repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
pmisik/buildbot | master/buildbot/test/fake/pbmanager.py | 5 | 1894 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from buildbot.util import service
class FakePBManager(service.AsyncMultiService):
def __init__(self):
super().__init__()
self.setName("fake-pbmanager")
self._registrations = []
self._unregistrations = []
def register(self, portstr, username, password, pfactory):
if (portstr, username) not in self._registrations:
reg = FakeRegistration(self, portstr, username)
self._registrations.append((portstr, username, password))
return defer.succeed(reg)
else:
raise KeyError("username '{}' is already registered on port {}".format(username,
portstr))
def _unregister(self, portstr, username):
self._unregistrations.append((portstr, username))
return defer.succeed(None)
class FakeRegistration:
def __init__(self, pbmanager, portstr, username):
self._portstr = portstr
self._username = username
self._pbmanager = pbmanager
def unregister(self):
self._pbmanager._unregister(self._portstr, self._username)
| gpl-2.0 |
NeostreamTechnology/Microservices | venv/lib/python2.7/site-packages/yaml/cyaml.py | 537 | 3290 |
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
from _yaml import CParser, CEmitter
from constructor import *
from serializer import *
from representer import *
from resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| mit |
HEPData/hepdata3 | hepdata/modules/records/migrator/api.py | 1 | 18355 | # -*- coding: utf-8 -*-
#
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import socket
from datetime import datetime, timedelta
from urllib2 import HTTPError
import requests
from celery import shared_task
from flask import current_app
import os
from invenio_db import db
from hepdata.ext.elasticsearch.api import get_records_matching_field, index_record_ids
from hepdata.modules.inspire_api.views import get_inspire_record_information
from hepdata.modules.dashboard.views import do_finalise
from hepdata.modules.records.utils.common import record_exists
from hepdata.modules.records.utils.submission import \
process_submission_directory, get_or_create_hepsubmission, \
remove_submission
from hepdata.modules.records.utils.workflow import create_record, update_record
import logging
from hepdata.modules.records.utils.yaml_utils import split_files
from hepdata.modules.submission.api import get_latest_hepsubmission, is_resource_added_to_submission
from hepdata.modules.submission.models import DataResource, HEPSubmission
from hepdata.utils.file_extractor import get_file_in_directory
from hepdata.modules.records.utils.doi_minter import generate_dois_for_submission
from hepdata.modules.email.api import notify_publication_update
logging.basicConfig()
log = logging.getLogger(__name__)
class FailedSubmission(Exception):
def __init__(self, message, errors, record_id):
# Call the base class constructor with the parameters it needs
super(FailedSubmission, self).__init__(message)
# Now for your custom code...
self.errors = errors
self.record_id = record_id
def print_errors(self):
for file in self.errors:
print(file)
for error_message in self.errors[file]:
print("\t{0} for {1}".format(error_message, self.record_id))
@shared_task
def update_analyses():
endpoints = current_app.config["ANALYSES_ENDPOINTS"]
for analysis_endpoint in endpoints:
if "endpoint_url" in endpoints[analysis_endpoint]:
log.info("Updating analyses from {0}...".format(analysis_endpoint))
response = requests.get(endpoints[analysis_endpoint]["endpoint_url"])
if response:
analyses = response.json()
for record in analyses:
submission = get_latest_hepsubmission(inspire_id=record, overall_status='finished')
if submission:
num_new_resources = 0
for analysis in analyses[record]:
_resource_url = endpoints[analysis_endpoint]["url_template"].format(analysis)
if not is_resource_added_to_submission(submission.publication_recid, submission.version,
_resource_url):
print('Adding {} analysis to ins{} with URL {}'
.format(analysis_endpoint, record, _resource_url))
new_resource = DataResource(
file_location=_resource_url,
file_type=analysis_endpoint)
submission.resources.append(new_resource)
num_new_resources += 1
if num_new_resources:
try:
db.session.add(submission)
db.session.commit()
index_record_ids([submission.publication_recid])
except Exception as e:
db.session.rollback()
log.error(e)
else:
log.debug("An analysis is available in {0} but with no equivalent in HEPData (ins{1}).".format(
analysis_endpoint, record))
else:
log.debug("No endpoint url configured for {0}".format(analysis_endpoint))
@shared_task
def update_submissions(inspire_ids_to_update, force=False, only_record_information=False, send_email=False):
migrator = Migrator()
for index, inspire_id in enumerate(inspire_ids_to_update):
_cleaned_id = inspire_id.replace("ins", "")
_matching_records = get_records_matching_field("inspire_id", _cleaned_id)
if len(_matching_records["hits"]["hits"]) >= 1:
recid = _matching_records["hits"]["hits"][0]["_source"]["recid"]
if "related_publication" in _matching_records["hits"]["hits"][0]["_source"]:
recid = _matching_records["hits"]["hits"][0]["_source"]["related_publication"]
print("The record with inspire_id {} and recid {} will be updated now".format(inspire_id, recid))
migrator.update_file.delay(inspire_id, recid, force, only_record_information, send_email)
else:
log.error("No record exists with id {0}. You should load this file first.".format(inspire_id))
@shared_task
def add_or_update_records_since_date(date=None, send_tweet=False, convert=False):
"""
Given a date, gets all the records updated or added since that
date and updates or adds the corresponding records.
:param date: in the format YYYYddMM (e.g. 20160705 for the 5th July 2016)
:param send_tweet:
:param convert:
:return:
"""
if not date:
# then use yesterdays date
yesterday = datetime.now() - timedelta(days=1)
date = yesterday.strftime("%Y%m%d")
inspire_ids = get_all_ids_in_current_system(date)
print("{0} records to be added or updated since {1}.".format(len(inspire_ids), date))
load_files(inspire_ids, send_tweet=send_tweet, convert=convert)
def get_all_ids_in_current_system(date=None, prepend_id_with="ins"):
"""
Finds all the IDs that have been added or updated since some date.
:param date:
:param prepend_id_with:
:return:
"""
import requests, re
brackets_re = re.compile(r"\[+|\]+")
inspire_ids = []
base_url = "http://hepdata.cedar.ac.uk/allids/{0}"
if date:
base_url = base_url.format(date)
else:
base_url = base_url.format("")
response = requests.get(base_url)
if response.ok:
_all_ids = response.text
for match in re.finditer("\[[0-9]+,[0-9]+,[0-9]+\]", _all_ids):
start = match.start()
end = match.end()
# process the block which is of the form [inspire_id,xxx,xxx]
id_block = brackets_re.sub("", _all_ids[start:end])
id = id_block.split(",")[0].strip()
if id != "0":
inspire_ids.append("{0}{1}".format(prepend_id_with, id))
return inspire_ids
def load_files(inspire_ids, send_tweet=False, synchronous=False, convert=False,
base_url='http://hepdata.cedar.ac.uk/view/{0}/yaml'):
"""
:param base_url: override default base URL
:param convert:
:param synchronous: if should be run immediately
:param send_tweet: whether or not to tweet this entry.
:param inspire_ids: array of inspire ids to load (in the format insXXX).
:return: None
"""
migrator = Migrator()
for index, inspire_id in enumerate(inspire_ids):
_cleaned_id = inspire_id.replace("ins", "")
if not record_exists(inspire_id=_cleaned_id):
print("The record with id {0} does not exist in the database, so we're loading it.".format(inspire_id))
try:
log.info("Loading {0}".format(inspire_id))
if synchronous:
migrator.load_file(inspire_id, send_tweet, convert=convert, base_url=base_url)
else:
migrator.load_file.delay(inspire_id, send_tweet, convert=convert, base_url=base_url)
except socket.error as se:
print("socket error...")
log.error(se.message)
except Exception as e:
print("Failed to load {0}. {1} ".format(inspire_id, e))
log.error("Failed to load {0}. {1} ".format(inspire_id, e))
else:
print("The record with inspire id {0} already exists. Updating instead.".format(inspire_id))
log.info("Updating {}".format(inspire_id))
if synchronous:
update_submissions([inspire_id])
else:
update_submissions.delay([inspire_id])
class Migrator(object):
"""
Performs the interface for all migration-related tasks including downloading, splitting files, YAML cleaning, and
loading.
"""
def __init__(self, base_url="http://hepdata.cedar.ac.uk/view/{0}/yaml"):
self.base_url = base_url
def prepare_files_for_submission(self, inspire_id, force_retrieval=False):
"""
Either returns a file if it already exists, or downloads it and
splits it.
:param inspire_id:
:return: output location if successful, None if not
"""
output_location = os.path.join(current_app.config["CFG_DATADIR"], inspire_id)
last_updated = datetime.now()
download = not os.path.exists(output_location) or (get_file_in_directory(output_location, 'yaml') is None)
if download or force_retrieval:
print("Downloading file for {0}".format(inspire_id))
file_location = self.download_file(inspire_id)
if file_location:
output_location = os.path.join(current_app.config["CFG_DATADIR"], inspire_id)
error, last_updated = split_files(file_location, output_location, "{0}.zip".format(output_location))
# remove temporary download file after processing
try:
os.remove(file_location)
except:
log.info('Unable to remove {0}'.format(file_location))
else:
output_location = None
else:
print("File for {0} already in system...no download required.".format(inspire_id))
return output_location, last_updated
@shared_task
def update_file(inspire_id, recid, force=False, only_record_information=False, send_email=False,
send_tweet=False, convert=False):
self = Migrator()
output_location, oldsite_last_updated = self.prepare_files_for_submission(inspire_id, force_retrieval=True)
if output_location:
updated_record_information, status = self.retrieve_publication_information(inspire_id)
if status == 'success':
record_information = update_record(recid, updated_record_information)
else:
log.error("Failed to retrieve publication information for {0}".format(inspire_id))
return
hep_submission = HEPSubmission.query.filter_by(publication_recid=recid).first()
version_count = HEPSubmission.query.filter_by(publication_recid=recid).count()
print('Old site last updated {}'.format(str(oldsite_last_updated)))
print('New site last updated {}'.format(str(hep_submission.last_updated)))
print('Coordinator ID is {}, version count is {}'.format(hep_submission.coordinator, version_count))
allow_update = (hep_submission.last_updated < oldsite_last_updated or force) and \
hep_submission.coordinator == 1 and version_count == 1
if not only_record_information and allow_update:
try:
recid = self.load_submission(
record_information, output_location, os.path.join(output_location, "submission.yaml"),
update=True)
print('Loaded record {}'.format(recid))
if recid is not None:
do_finalise(recid, publication_record=record_information,
force_finalise=True, send_tweet=send_tweet, update=True, convert=convert)
except FailedSubmission as fe:
log.error(fe.message)
fe.print_errors()
remove_submission(fe.record_id)
elif not only_record_information:
print('Not updating record {}'.format(recid))
else:
index_record_ids([record_information["recid"]])
_cleaned_id = inspire_id.replace("ins", "")
generate_dois_for_submission.delay(inspire_id=_cleaned_id) # update metadata stored in DataCite
if send_email:
notify_publication_update(hep_submission, record_information) # send email to all participants
else:
log.error("Failed to load {0}".format(inspire_id))
@shared_task
def load_file(inspire_id, send_tweet=False, convert=False, base_url='http://hepdata.cedar.ac.uk/view/{0}/yaml'):
self = Migrator(base_url)
output_location, oldsite_last_updated = self.prepare_files_for_submission(inspire_id)
if output_location:
publication_information, status = self.retrieve_publication_information(inspire_id)
if status == "success":
record_information = create_record(publication_information)
else:
log.error("Failed to retrieve publication information for " + inspire_id)
return False
try:
recid = self.load_submission(
record_information, output_location,
os.path.join(output_location, "submission.yaml"))
if recid is not None:
do_finalise(recid, publication_record=record_information,
force_finalise=True, send_tweet=send_tweet, convert=convert)
return True
except FailedSubmission as fe:
log.error(fe.message)
fe.print_errors()
remove_submission(fe.record_id)
return False
else:
log.error("Failed to load " + inspire_id)
return False
def download_file(self, inspire_id):
"""
:param inspire_id:
:return:
"""
import requests
import tempfile
from shutil import copyfile
# Check if single YAML file exists in static directory.
base_dir = os.path.dirname(os.path.realpath(__file__))
yaml_file = os.path.join(base_dir, 'static', inspire_id + '.yaml')
if os.path.isfile(yaml_file):
print("Found {}".format(yaml_file))
tmp_file = tempfile.NamedTemporaryFile(dir=current_app.config["CFG_TMPDIR"], delete=False)
tmp_file.close()
copyfile(yaml_file, tmp_file.name)
return tmp_file.name
try:
url = self.base_url.format(inspire_id)
log.info("Trying URL " + url)
response = requests.get(url)
if response.ok:
yaml = response.text
# save to tmp file
tmp_file = tempfile.NamedTemporaryFile(dir=current_app.config["CFG_TMPDIR"],
delete=False)
tmp_file.write(yaml)
tmp_file.close()
return tmp_file.name
else:
log.error('Non OK response from endpoint at {0}'.format(url))
return None
except HTTPError as e:
log.error("Failed to download {0}".format(inspire_id))
log.error(e.message)
return None
def retrieve_publication_information(self, inspire_id):
"""
:param inspire_id: id for record to get. If this contains "ins", the "ins" is removed.
:return: dict containing keys for:
title
doi
authors
abstract
arxiv_id
collaboration
"""
if "ins" in inspire_id:
inspire_id = int(inspire_id.replace("ins", ""))
content, status = get_inspire_record_information(inspire_id)
content["inspire_id"] = inspire_id
return content, status
def load_submission(self, record_information, file_base_path,
submission_yaml_file_location, update=False):
"""
:param record_information:
:param file_base_path:
:param files:
:return:
"""
# create publication record.
# load data tables
# create data table records (call finalise(recid))
admin_user_id = 1
# consume data payload and store in db.
get_or_create_hepsubmission(record_information["recid"], admin_user_id)
errors = process_submission_directory(file_base_path,
submission_yaml_file_location,
record_information["recid"], update=update)
if len(errors) > 0:
print("ERRORS ARE: ")
print(errors)
if errors:
raise FailedSubmission("Submission failed for {0}.".format(
record_information["recid"]), errors,
record_information["recid"])
else:
return record_information["recid"]
| gpl-2.0 |
wbyne/QGIS | python/ext-libs/pygments/lexers/go.py | 47 | 3701 | # -*- coding: utf-8 -*-
"""
pygments.lexers.go
~~~~~~~~~~~~~~~~~~
Lexers for the Google Go language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['GoLexer']
class GoLexer(RegexLexer):
"""
For `Go <http://golang.org>`_ source.
.. versionadded:: 1.2
"""
name = 'Go'
filenames = ['*.go']
aliases = ['go']
mimetypes = ['text/x-gosrc']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuations
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(import|package)\b', Keyword.Namespace),
(r'(var|func|struct|map|chan|type|interface|const)\b',
Keyword.Declaration),
(words((
'break', 'default', 'select', 'case', 'defer', 'go',
'else', 'goto', 'switch', 'fallthrough', 'if', 'range',
'continue', 'for', 'return'), suffix=r'\b'),
Keyword),
(r'(true|false|iota|nil)\b', Keyword.Constant),
# It seems the builtin types aren't actually keywords, but
# can be used as functions. So we need two declarations.
(words((
'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'int', 'int8', 'int16', 'int32', 'int64',
'float', 'float32', 'float64',
'complex64', 'complex128', 'byte', 'rune',
'string', 'bool', 'error', 'uintptr',
'print', 'println', 'panic', 'recover', 'close', 'complex',
'real', 'imag', 'len', 'cap', 'append', 'copy', 'delete',
'new', 'make'), suffix=r'\b(\()'),
bygroups(Name.Builtin, Punctuation)),
(words((
'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'int', 'int8', 'int16', 'int32', 'int64',
'float', 'float32', 'float64',
'complex64', 'complex128', 'byte', 'rune',
'string', 'bool', 'error', 'uintptr'), suffix=r'\b'),
Keyword.Type),
# imaginary_lit
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# float_lit
(r'\d+(\.\d+[eE][+\-]?\d+|'
r'\.\d*|[eE][+\-]?\d+)', Number.Float),
(r'\.\d+([eE][+\-]?\d+)?', Number.Float),
# int_lit
# -- octal_lit
(r'0[0-7]+', Number.Oct),
# -- hex_lit
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- decimal_lit
(r'(0|[1-9][0-9]*)', Number.Integer),
# char_lit
(r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
String.Char),
# StringLiteral
# -- raw_string_lit
(r'`[^`]*`', String),
# -- interpreted_string_lit
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator),
(r'[|^<>=!()\[\]{}.,;:]', Punctuation),
# identifier
(r'[^\W\d]\w*', Name.Other),
]
}
| gpl-2.0 |
jumpstarter-io/horizon | openstack_dashboard/dashboards/admin/images/tests.py | 4 | 8137 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.admin.images import tables
class ImageCreateViewTest(test.BaseAdminViewTests):
def test_admin_image_create_view_uses_admin_template(self):
res = self.client.get(
reverse('horizon:admin:images:create'))
self.assertTemplateUsed(res, 'admin/images/create.html')
class ImagesViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_images_list(self):
filters = {'is_public': None}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([self.images.list(),
False, False])
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:admin:images:index'))
self.assertTemplateUsed(res, 'admin/images/index.html')
self.assertEqual(len(res.context['images_table'].data),
len(self.images.list()))
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_images_list_get_pagination(self):
images = self.images.list()[:5]
filters = {'is_public': None}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images,
True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images[:2],
True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images[2:4],
True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[4].id,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images[4:],
True, True])
self.mox.ReplayAll()
url = reverse('horizon:admin:images:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['images_table'].data),
len(images))
self.assertTemplateUsed(res, 'admin/images/index.html')
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "?".join([reverse('horizon:admin:images:index'),
"=".join([tables.AdminImagesTable._meta.pagination_param,
images[2].id])])
res = self.client.get(url)
# get second page (items 2-4)
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "?".join([reverse('horizon:admin:images:index'),
"=".join([tables.AdminImagesTable._meta.pagination_param,
images[4].id])])
res = self.client.get(url)
# get third page (item 5)
self.assertEqual(len(res.context['images_table'].data),
1)
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_images_list_get_prev_pagination(self):
images = self.images.list()[:3]
filters = {'is_public': None}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images,
True, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images[:2],
True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images[2:],
True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
paginate=True,
filters=filters,
sort_dir='asc') \
.AndReturn([images[:2],
True, True])
self.mox.ReplayAll()
url = reverse('horizon:admin:images:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['images_table'].data),
len(images))
self.assertTemplateUsed(res, 'admin/images/index.html')
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "?".join([reverse('horizon:admin:images:index'),
"=".join([tables.AdminImagesTable._meta.pagination_param,
images[2].id])])
res = self.client.get(url)
# get second page (item 3)
self.assertEqual(len(res.context['images_table'].data), 1)
url = "?".join([reverse('horizon:admin:images:index'),
"=".join([tables.AdminImagesTable._meta.prev_pagination_param,
images[2].id])])
res = self.client.get(url)
# prev back to get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
| apache-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/django/db/backends/sqlite3/features.py | 49 | 2641 | from __future__ import unicode_literals
from django.db import utils
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils import six
from django.utils.functional import cached_property
from .base import Database
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
supports_foreign_keys = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
can_clone_databases = True
supports_temporal_subtraction = True
ignores_table_name_case = True
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def supports_index_column_ordering(self):
return Database.sqlite_version_info >= (3, 3, 0)
@cached_property
def can_release_savepoints(self):
return self.uses_savepoints
@cached_property
def can_share_in_memory_db(self):
return (
six.PY3 and
Database.__name__ == 'sqlite3.dbapi2' and
Database.sqlite_version_info >= (3, 7, 13)
)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_expression_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
| gpl-3.0 |
ryfeus/lambda-packs | Keras_tensorflow/source/tensorflow/contrib/learn/python/learn/ops/__init__.py | 124 | 1104 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various TensorFlow Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.ops.embeddings_ops import *
from tensorflow.contrib.learn.python.learn.ops.losses_ops import *
from tensorflow.contrib.learn.python.learn.ops.seq2seq_ops import *
# pylint: enable=wildcard-import
| mit |
derekjanni/spyre | tests/cptestcase.py | 3 | 3887 | # -*- coding: utf-8 -*-
# from StringIO import StringIO
from io import StringIO
import unittest
try:
import urllib.parse as urllib
except ImportError:
import urllib
import cherrypy
# Not strictly speaking mandatory but just makes sense
cherrypy.config.update({'environment': "test_suite"})
# This is mandatory so that the HTTP server isn't started
# if you need to actually start (why would you?), simply
# subscribe it back.
cherrypy.server.unsubscribe()
# simulate fake socket address... they are irrelevant in our context
local = cherrypy.lib.httputil.Host('127.0.0.1', 50000, "")
remote = cherrypy.lib.httputil.Host('127.0.0.1', 50001, "")
__all__ = ['BaseCherryPyTestCase']
class BaseCherryPyTestCase(unittest.TestCase):
def request(self, path='/', method='GET', app_path='', scheme='http',
proto='HTTP/1.1', data=None, headers=None, **kwargs):
"""
CherryPy does not have a facility for serverless unit testing.
However this recipe demonstrates a way of doing it by
calling its internal API to simulate an incoming request.
This will exercise the whole stack from there.
Remember a couple of things:
* CherryPy is multithreaded. The response you will get
from this method is a thread-data object attached to
the current thread. Unless you use many threads from
within a unit test, you can mostly forget
about the thread data aspect of the response.
* Responses are dispatched to a mounted application's
page handler, if found. This is the reason why you
must indicate which app you are targetting with
this request by specifying its mount point.
You can simulate various request settings by setting
the `headers` parameter to a dictionary of headers,
the request's `scheme` or `protocol`.
.. seealso: http://docs.cherrypy.org/stable/refman/_cprequest.html#cherrypy._cprequest.Response
"""
# This is a required header when running HTTP/1.1
h = {'Host': '127.0.0.1'}
if headers is not None:
h.update(headers)
# If we have a POST/PUT request but no data
# we urlencode the named arguments in **kwargs
# and set the content-type header
if method in ('POST', 'PUT') and not data:
data = urllib.urlencode(kwargs)
kwargs = None
h['content-type'] = 'application/x-www-form-urlencoded'
# If we did have named arguments, let's
# urlencode them and use them as a querystring
qs = None
if kwargs:
qs = urllib.urlencode(kwargs)
# if we had some data passed as the request entity
# let's make sure we have the content-length set
fd = None
if data is not None:
h['content-length'] = '%d' % len(data)
fd = StringIO(data)
# Get our application and run the request against it
app = cherrypy.tree.apps.get(app_path)
if not app:
# XXX: perhaps not the best exception to raise?
raise AssertionError("No application mounted at '%s'" % app_path)
# Cleanup any previous returned response
# between calls to this method
app.release_serving()
# Let's fake the local and remote addresses
request, response = app.get_serving(local, remote, scheme, proto)
try:
h = [(k, v) for k, v in h.items()]
response = request.run(method, path, qs, proto, h, fd)
finally:
if fd:
fd.close()
fd = None
if response.output_status.decode('utf8').find('500')==0:
raise AssertionError("Unexpected error")
# collapse the response into a bytestring
response.collapse_body()
return response | mit |
nvoron23/brython | src/Lib/random.py | 104 | 25660 | """Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from collections.abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
if a is None:
try:
a = int.from_bytes(_urandom(32), 'big')
except NotImplementedError:
import time
a = int(time.time() * 256) # use fractional seconds
if version == 2:
if isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(self.random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overriden random() method but no new getrandbits() method,
# so we can only use random() from here.
random = self.random
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence')
return seq[i]
def shuffle(self, x, random=None):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
c = 0.5 if mode is None else (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| bsd-3-clause |
raven47git/pyspider | pyspider/database/sqlite/sqlitebase.py | 70 | 1861 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2014-11-22 20:30:44
import time
import sqlite3
import threading
class SQLiteMixin(object):
@property
def dbcur(self):
pid = threading.current_thread().ident
if not (self.conn and pid == self.last_pid):
self.last_pid = pid
self.conn = sqlite3.connect(self.path, isolation_level=None)
return self.conn.cursor()
class SplitTableMixin(object):
UPDATE_PROJECTS_TIME = 10 * 60
def _tablename(self, project):
if self.__tablename__:
return '%s_%s' % (self.__tablename__, project)
else:
return project
@property
def projects(self):
if time.time() - getattr(self, '_last_update_projects', 0) \
> self.UPDATE_PROJECTS_TIME:
self._list_project()
return self._projects
@projects.setter
def projects(self, value):
self._projects = value
def _list_project(self):
self._last_update_projects = time.time()
self.projects = set()
if self.__tablename__:
prefix = '%s_' % self.__tablename__
else:
prefix = ''
for project, in self._select('sqlite_master', what='name',
where='type = "table"'):
if project.startswith(prefix):
project = project[len(prefix):]
self.projects.add(project)
def drop(self, project):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
tablename = self._tablename(project)
self._execute("DROP TABLE %s" % self.escape(tablename))
self._list_project()
| apache-2.0 |
vLBrian/boxeehack-cigamit | hack/boxee/skin/boxee/720p/scripts/boxeehack_update.py | 2 | 4717 | import time
import os,sys
import xbmc, xbmcgui, mc
import subprocess
import common
import time
import urllib2
# Get the remote version number from github
def get_remote_version():
u = urllib2.urlopen('http://dl.boxeed.in/version')
version_remote = "%s" % u.read()
return version_remote
# Get the version number for the locally installed version
def get_local_version():
version_local = common.file_get_contents("/data/hack/version")
return version_local
# Check for newer version
def check_new_version():
version_remote = get_remote_version()
version_local = get_local_version()
version_remote_parts = version_remote.split(".")
version_local_parts = version_local.split(".")
hasnew = 0
if version_remote_parts[0] > version_local_parts[0]:
hasnew = 1
elif version_remote_parts[0] == version_local_parts[0]:
if version_remote_parts[1] > version_local_parts[1]:
hasnew = 1
elif version_remote_parts[1] == version_local_parts[1]:
if version_remote_parts[2] > version_local_parts[2]:
hasnew = 1
issame = 0
if version_remote_parts[0] == version_local_parts[0]:
if version_remote_parts[1] == version_local_parts[1]:
if version_remote_parts[2] == version_local_parts[2]:
issame = 1
dialog = xbmcgui.Dialog()
if hasnew:
if dialog.yesno("BOXEE+HACKS Version", "A new version of BOXEE+ is available. Upgrade to %s now?" % (version_remote)):
update()
elif issame:
dialog.ok("BOXEE+HACKS Version", "Your BOXEE+ version is up to date.")
else:
dialog.ok("BOXEE+HACKS Version", "Hi there Doc Brown. How's the future?")
def update():
version_remote = get_remote_version()
os.system("dtool 6 1 0 100")
os.system("dtool 6 2 0 0")
mc.ShowDialogNotification("Beginning Upgrade")
if os.path.exists("/media/BOXEE/hack"):
mc.ShowDialogNotification("Found USB Drive with Boxee+")
ev = common.file_get_contents("/media/BOXEE/hack/version")
xbmc.executebuiltin("Notification(,Installing Boxee+,60000)")
mc.ShowDialogWait()
os.system("rm -Rf /data/hack")
os.system("cp -R /media/BOXEE/hack /data/")
os.system("chmod -R +x /data/hack/*.sh")
os.system("chmod -R +x /data/hack/bin/*")
mc.HideDialogWait()
else:
# Clean Up to Ensure we have Disk Space
cleanupdownload()
xbmc.executebuiltin("Notification(,Downloading Boxee+,120000)")
mc.ShowDialogWait()
os.system("/opt/local/bin/curl -L http://dl.boxeed.in/boxeehack.zip -o /download/boxeehack.zip")
os.system("/opt/local/bin/curl -L http://dl.boxeed.in/boxeehack.md5 -o /download/boxeehack.md5")
dm = common.file_get_contents("/download/boxeehack.md5")
os.system("md5sum /download/boxeehack.zip | awk '{ print $1 }'> /download/boxeehack.md52")
tm = common.file_get_contents("/download/boxeehack.md5")
mc.HideDialogWait()
if dm != tm or tm == "":
os.system("dtool 6 1 0 0")
os.system("dtool 6 2 0 50")
xbmc.executebuiltin("Notification(,Download Failed - Aborting,60000)")
return
mc.ShowDialogNotification("Download Complete")
time.sleep(2)
xbmc.executebuiltin("Notification(,Extracting Archive,120000)")
mc.ShowDialogWait()
os.system("/bin/busybox unzip /download/boxeehack.zip -d /download/")
mc.HideDialogWait()
mc.ShowDialogNotification("Extraction Complete")
time.sleep(2)
mc.ShowDialogNotification("Verifying Extraction")
ev = common.file_get_contents("/download/boxeehack-master/hack/version")
if ev != version_remote:
os.system("dtool 6 1 0 0")
os.system("dtool 6 2 0 50")
xbmc.executebuiltin("Notification(,Extraction Failed - Aborting,60000)")
return
time.sleep(2)
xbmc.executebuiltin("Notification(,Installing Boxee+,60000)")
mc.ShowDialogWait()
os.system("rm -Rf /data/hack")
os.system("cp -R /download/boxeehack-master/hack /data/")
os.system("chmod 777 /data/hack/*.sh")
os.system("chmod 777 /data/hack/bin/*")
mc.HideDialogWait()
mc.ShowDialogNotification("Verifying Installation")
hv = common.file_get_contents("/data/hack/version")
if ev != hv:
os.system("dtool 6 1 0 0")
os.system("dtool 6 2 0 50")
xbmc.executebuiltin("Notification(,Installation Failed - Aborting,60000)")
return
time.sleep(2)
mc.ShowDialogNotification("Cleaning Up")
cleanupdownload()
time.sleep(2)
os.system("dtool 6 1 0 0")
os.system("dtool 6 2 0 50")
# No need to redo all the settings since Boxee+ is already running
xbmc.executebuiltin("Notification(,Rebooting,120000)")
os.system("reboot")
def cleanupdownload():
os.system("rm -fr /download/boxeehack-master")
os.system("rm -fr /download/boxeehack.zip")
os.system("rm -fr /download/boxeehack.md5")
os.system("rm -fr /download/boxeehack.md52")
if (__name__ == "__main__"):
command = sys.argv[1]
if command == "version": check_new_version()
| mit |
deKupini/erp | addons/pos_loyalty/__openerp__.py | 9 | 1607 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Loyalty Program',
'version': '1.0',
'category': 'Point of Sale',
'sequence': 6,
'summary': 'Loyalty Program for the Point of Sale ',
'description': """
=======================
This module allows you to define a loyalty program in
the point of sale, where the customers earn loyalty points
and get rewards.
""",
'author': 'OpenERP SA',
'depends': ['point_of_sale'],
'data': [
'views/views.xml',
'security/ir.model.access.csv',
'views/templates.xml'
],
'qweb': ['static/src/xml/loyalty.xml'],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
houghb/HDSAviz | savvy/network_tools.py | 2 | 9496 | """
This module contains functions to create and display network graphs of the
sensitivity analysis results. It is included as an independent module in
this package because graph-tools is an uncommon package that is slightly
more involved to install than normal conda- or pip-accessible packages. All
the other visualization functionality of savvy is accessible with the more
readily available bokeh plots.
The plots generated in this module offer a good visualization of which
parameters have the highest sensitivities, and which are connected by
second order interactions. Relative sizes of vertices on these plots are not
very good representations of the actual difference in magnitude between
sensitivities (a value of 0.02 appears similar to a value of 0.2). The bokeh
visualizations offer better insight into these relative magnitudes.
"""
try:
from graph_tool import Graph, draw, community
except ImportError:
print ('----\ngraph-tool package is not installed - please install it to '
'use network_tools!\nOther modules in savvy are independent'
'of graph-tool.')
def build_graph(df_list, sens='ST', top=410, min_sens=0.01,
edge_cutoff=0.0):
"""
Initializes and constructs a graph where vertices are the parameters
selected from the first dataframe in 'df_list', subject to the
constraints set by 'sens', 'top', and 'min_sens'. Edges are the second
order sensitivities of the interactions between those vertices,
with sensitivities greater than 'edge_cutoff'.
Parameters
-----------
df_list : list
A list of two dataframes. The first dataframe should be
the first/total order sensitivities collected by the
function data_processing.get_sa_data().
sens : str, optional
A string with the name of the sensitivity that you would
like to use for the vertices ('ST' or 'S1').
top : int, optional
An integer specifying the number of vertices to display (
the top sensitivity values).
min_sens : float, optional
A float with the minimum sensitivity to allow in the graph.
edge_cutoff : float, optional
A float specifying the minimum second order sensitivity to
show as an edge in the graph.
Returns
--------
g : graph-tool object
a graph-tool graph object of the network described above. Each
vertex has properties 'param', 'sensitivity', and 'confidence'
corresponding to the name of the parameter, value of the sensitivity
index, and it's confidence interval. The only edge property is
'second_sens', the second order sensitivity index for the
interaction between the two vertices it connects.
"""
# get the first/total index dataframe and second order dataframe
df = df_list[0]
df2 = df_list[1]
# Make sure sens is ST or S1
if sens not in set(['ST', 'S1']):
raise ValueError('sens must be ST or S1')
# Make sure that there is a second order index dataframe
try:
if not df2:
raise Exception('Missing second order dataframe!')
except:
pass
# slice the dataframes so the resulting graph will only include the top
# 'top' values of 'sens' greater than 'min_sens'.
df = df.sort_values(sens, ascending=False)
df = df.ix[df[sens] > min_sens, :].head(top)
df = df.reset_index()
# initialize a graph
g = Graph()
vprop_sens = g.new_vertex_property('double')
vprop_conf = g.new_vertex_property('double')
vprop_name = g.new_vertex_property('string')
eprop_sens = g.new_edge_property('double')
g.vertex_properties['param'] = vprop_name
g.vertex_properties['sensitivity'] = vprop_sens
g.vertex_properties['confidence'] = vprop_conf
g.edge_properties['second_sens'] = eprop_sens
# keep a list of all the vertices
v_list = []
# Add the vertices to the graph
for i, param in enumerate(df['Parameter']):
v = g.add_vertex()
vprop_sens[v] = df.ix[i, sens]
vprop_conf[v] = 1 + df.ix[i, '%s_conf' % sens] / df.ix[i, sens]
vprop_name[v] = param
v_list.append(v)
# Make two new columns in second order dataframe that point to the vertices
# connected on each row.
df2['vertex1'] = -999
df2['vertex2'] = -999
for vertex in v_list:
param = g.vp.param[vertex]
df2.ix[df2['Parameter_1'] == param, 'vertex1'] = vertex
df2.ix[df2['Parameter_2'] == param, 'vertex2'] = vertex
# Only allow edges for vertices that we've defined
df_edges = df2[(df2['vertex1'] != -999) & (df2['vertex2'] != -999)]
# eliminate edges below a certain cutoff value
pruned = df_edges[df_edges['S2'] > edge_cutoff]
pruned.reset_index(inplace=True)
# Add the edges for the graph
for i, sensitivity in enumerate(pruned['S2']):
v1 = pruned.ix[i, 'vertex1']
v2 = pruned.ix[i, 'vertex2']
e = g.add_edge(v1, v2)
# multiply by a number to make the lines visible on the plot
eprop_sens[e] = sensitivity * 150
# These are ways you can reference properties of vertices or edges
# g.vp.param[g.vertex(77)]
# g.vp.param[v_list[0]]
print ('Created a graph with %s vertices and %s edges.\nVertices are the '
'top %s %s values greater than %s.\nOnly S2 values (edges) '
'greater than %s are included.' %
(g.num_vertices(), g.num_edges(), top, sens, min_sens, edge_cutoff))
return g
def plot_network_random(g, inline=True, filename=None, scale=300.0):
"""
Display a plot of the network, g, with the vertices placed in an
unstructured, apparently random layout. Vertices are the model
parameters and they are connected by edges whose thickness indicates the
value of the second order sensitivity.
Parameters
-----------
g : graph-tool graph
The graph to plot.
inline : bool, optional
Boolean indicating whether the plot should be shown inline in
an ipython notebook. If false the plot is created in its own
window and is somewhat interactive.
filename : str, optional
If you would like to save the plot to a file specify a
filename (with an extension of pdf or png).
scale : float, optional
If you would like to resize the vertices you can change the
value of this float.
Returns
--------
graph-tool plot
"""
for i in range(g.num_vertices()):
g.vp['sensitivity'][i] = scale * g.vp['sensitivity'][i]
draw.graph_draw(g,
vertex_text=g.vp['param'],
vertex_font_size=10,
vertex_text_position=-0.1,
# vertex_text_color='black',
vertex_size=g.vp['sensitivity'],
vertex_color='#006600',
vertex_fill_color='#006600',
vertex_halo=True,
vertex_halo_color='#b3c6ff',
vertex_halo_size=g.vp['confidence'],
edge_color='#002699',
edge_pen_width=g.ep['second_sens'],
output_size=(600, 600),
inline=inline,
output=filename
)
def plot_network_circle(g, inline=True, filename=None, scale=300.0):
"""
Display a plot of the network, g, with the vertices placed around the
edge of a circle. Vertices are the model parameters and they are
connected by edges whose thickness indicates the value of the second
order sensitivity.
Parameters
-----------
g : graph-tool graph
The graph to plot.
inline : bool, optional
Boolean indicating whether the plot should be shown inline in
an ipython notebook. If false the plot is created in its own
window and is somewhat interactive.
filename : str, optional
If you would like to save the plot to a file specify a
filename (with an extension of pdf or png).
scale : float, optional
If you would like to resize the vertices you can change the
value of this float.
Returns
--------
graph-tool plot
"""
for i in range(g.num_vertices()):
g.vp['sensitivity'][i] = scale * g.vp['sensitivity'][i]
state = community.minimize_nested_blockmodel_dl(g, deg_corr=True)
draw.draw_hierarchy(state,
vertex_text=g.vp['param'],
vertex_text_position=-0.1,
# vertex_text_color='black',
vertex_font_size=10,
vertex_size=g.vp['sensitivity'],
vertex_color='#006600',
vertex_fill_color='#006600',
vertex_halo=True,
vertex_halo_color='#b3c6ff',
vertex_halo_size=g.vp['confidence'],
edge_pen_width=g.ep['second_sens'],
# subsample_edges=100,
output_size=(600, 600),
inline=inline,
output=filename
)
| bsd-2-clause |
soltanmm-google/grpc | test/core/bad_client/gen_build_yaml.py | 26 | 3674 | #!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generates the appropriate build.json data for all the bad_client tests."""
import collections
import yaml
TestOptions = collections.namedtuple('TestOptions', 'flaky cpu_cost')
default_test_options = TestOptions(False, 1.0)
# maps test names to options
BAD_CLIENT_TESTS = {
'badreq': default_test_options,
'connection_prefix': default_test_options._replace(cpu_cost=0.2),
'headers': default_test_options._replace(cpu_cost=0.2),
'initial_settings_frame': default_test_options._replace(cpu_cost=0.2),
'head_of_line_blocking': default_test_options,
'large_metadata': default_test_options,
'server_registered_method': default_test_options,
'simple_request': default_test_options,
'window_overflow': default_test_options,
'unknown_frame': default_test_options,
}
def main():
json = {
'#': 'generated with test/bad_client/gen_build_json.py',
'libs': [
{
'name': 'bad_client_test',
'build': 'private',
'language': 'c',
'src': [
'test/core/bad_client/bad_client.c'
],
'headers': [
'test/core/bad_client/bad_client.h'
],
'vs_proj_dir': 'test/bad_client',
'deps': [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr'
]
}],
'targets': [
{
'name': '%s_bad_client_test' % t,
'cpu_cost': BAD_CLIENT_TESTS[t].cpu_cost,
'build': 'test',
'language': 'c',
'secure': 'no',
'src': ['test/core/bad_client/tests/%s.c' % t],
'vs_proj_dir': 'test',
'exclude_iomgrs': ['uv'],
'deps': [
'bad_client_test',
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr'
]
}
for t in sorted(BAD_CLIENT_TESTS.keys())]}
print yaml.dump(json)
if __name__ == '__main__':
main()
| bsd-3-clause |
flyngPig/APM_simulink | ardupilot/mk/PX4/Tools/genmsg/test/test_genmsg_msg_loader.py | 215 | 29225 | # Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import random
def get_test_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'files'))
def test_exceptions():
from genmsg import MsgNotFound
try:
raise MsgNotFound('hello')
except MsgNotFound:
pass
def test__convert_constant_value():
from genmsg.msg_loader import convert_constant_value
from genmsg import InvalidMsgSpec
assert 0. == convert_constant_value('float32', '0.0')
assert 0. == convert_constant_value('float64', '0.0')
assert 'fo o' == convert_constant_value('string', ' fo o ')
assert 1 == convert_constant_value('byte', '1')
assert 1 == convert_constant_value('char', '1')
assert 1 == convert_constant_value('int8', '1')
assert 12 == convert_constant_value('int16', '12')
assert -13 == convert_constant_value('int32', '-13')
assert 14 == convert_constant_value('int64', '14')
assert 0 == convert_constant_value('uint8', '0')
assert 18 == convert_constant_value('uint16', '18')
assert 19 == convert_constant_value('uint32', '19')
assert 20 == convert_constant_value('uint64', '20')
assert True == convert_constant_value('bool', '1')
assert False == convert_constant_value('bool', '0')
width_fail = [('int8', '129'), ('uint8', '256'),
('int16', '35536'), ('uint16', '-1'),('uint16', '65536'),
('int32', '3000000000'),('int32', '-2700000000'),
('uint32', '-1'),('uint32', '41000000000'),
('uint64', '-1')]
for t, v in width_fail:
try:
convert_constant_value(t, v)
assert False, "should have failed width check: %s, %s"%(t, v)
except InvalidMsgSpec:
pass
type_fail = [('int32', 'f'), ('float32', 'baz')]
for t, v in type_fail:
try:
convert_constant_value(t, v)
assert False, "should have failed type check: %s, %s"%(t, v)
except ValueError:
pass
try:
convert_constant_value('foo', '1')
assert False, "should have failed invalid type"
except InvalidMsgSpec:
pass
def test__load_constant_line():
from genmsg.msgs import Constant, InvalidMsgSpec
from genmsg.msg_loader import _load_constant_line
try:
_load_constant_line("int8 field=alpha")
assert False, "should have raised"
except InvalidMsgSpec:
pass
try:
_load_constant_line("int8 field=")
assert False, "should have raised"
except InvalidMsgSpec:
pass
try:
_load_constant_line("faketype field=1")
assert False, "should have raised"
except InvalidMsgSpec:
pass
c = _load_constant_line("int8 field=1")
assert c == Constant('int8', 'field', 1, '1')
c = _load_constant_line("string val=hello #world")
assert c == Constant('string', 'val', 'hello #world', 'hello #world')
def test__load_field_line():
from genmsg.msgs import InvalidMsgSpec, Field
from genmsg.msg_loader import _load_field_line, InvalidMsgSpec, Field, is_valid_msg_field_name
try:
_load_field_line("string", 'foo')
assert False, "should have raised"
except InvalidMsgSpec:
pass
assert not is_valid_msg_field_name('string[')
try:
_load_field_line("string data!", 'foo')
assert False, "should have raised"
except InvalidMsgSpec:
pass
try:
_load_field_line("string[ data", 'foo')
assert False, "should have raised"
except InvalidMsgSpec:
pass
f =_load_field_line("string str", 'foo')
assert f == ('string', 'str')
f =_load_field_line("string str #nonsense", 'foo')
assert f == ('string', 'str')
f =_load_field_line("String str #nonsense", '')
assert f == ('String', 'str')
f =_load_field_line("String str #nonsense", 'foo')
assert f == ('foo/String', 'str')
# make sure Header is mapped
f =_load_field_line("Header header #nonsense", 'somewhere')
assert f == ('std_msgs/Header', 'header'), f
f =_load_field_line("Header header #nonsense", '')
assert f == ('std_msgs/Header', 'header'), f
def test_load_msg_from_string():
# make sure Header -> std_msgs/Header conversion works
from genmsg.msgs import Constant
from genmsg.msg_loader import load_msg_from_string, MsgContext
context = MsgContext.create_default()
msgspec = load_msg_from_string(context, "Header header", 'test_pkg/HeaderTest')
print(msgspec)
assert msgspec.has_header()
assert msgspec.types == ['std_msgs/Header']
assert msgspec.names == ['header']
assert msgspec.constants == []
assert msgspec.short_name == 'HeaderTest'
assert msgspec.package == 'test_pkg'
assert msgspec.full_name == 'test_pkg/HeaderTest'
msgspec = load_msg_from_string(context, "int8 c=1\nHeader header\nint64 data", 'test_pkg/HeaderValsTest')
assert msgspec.has_header()
assert msgspec.types == ['std_msgs/Header', 'int64']
assert msgspec.names == ['header', 'data']
assert msgspec.constants == [Constant('int8', 'c', 1, '1')]
assert msgspec.short_name == 'HeaderValsTest'
assert msgspec.package == 'test_pkg'
assert msgspec.full_name == 'test_pkg/HeaderValsTest'
msgspec = load_msg_from_string(context, "string data\nint64 data2", 'test_pkg/ValsTest')
assert not msgspec.has_header()
assert msgspec.types == ['string', 'int64']
assert msgspec.names == ['data', 'data2']
assert msgspec.constants == []
assert msgspec.short_name == 'ValsTest'
assert msgspec.full_name == 'test_pkg/ValsTest'
def _validate_TestString(msgspec):
assert ['caller_id', 'orig_caller_id', 'data'] == msgspec.names, msgspec.names
assert ['string', 'string', 'string'] == msgspec.types, msgspec.types
def test_load_msg_from_file():
from genmsg.msgs import InvalidMsgSpec
from genmsg.msg_loader import load_msg_from_file, MsgContext
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
msg_context = MsgContext.create_default()
spec = load_msg_from_file(msg_context, test_string_path, 'test_ros/TestString')
assert spec.full_name == 'test_ros/TestString'
assert spec.package == 'test_ros'
assert spec.short_name == 'TestString'
_validate_TestString(spec)
# test repeat
spec_2 = load_msg_from_file(msg_context, test_string_path, 'test_ros/TestString')
assert spec == spec_2
assert spec.package == spec_2.package
assert spec.short_name == spec_2.short_name
# test w/ bad file
test_bad_path = os.path.join(test_ros_dir, 'Bad.msg')
try:
load_msg_from_file(msg_context, test_bad_path, 'test_ros/Bad')
assert False, "should have raised"
except InvalidMsgSpec:
pass
# supposed to register
assert msg_context.is_registered('test_ros/TestString'), msg_context
def test_load_msg_from_string_TestString():
from genmsg.msg_loader import load_msg_from_string, MsgContext
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
with open(test_string_path) as f:
text = f.read()
msg_context = MsgContext.create_default()
_validate_TestString(load_msg_from_string(msg_context, text, 'test_ros/TestString'))
# supposed to register
assert msg_context.is_registered('test_ros/TestString'), msg_context
def test_load_msg_by_type():
from genmsg.msg_loader import load_msg_by_type, MsgContext, MsgNotFound
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
search_path = {
'test_ros': [ test_ros_dir ],
'geometry_msgs': [ geometry_d ],
}
msg_context = MsgContext.create_default()
msgspec = load_msg_by_type(msg_context, 'test_ros/TestString', search_path)
_validate_TestString(msgspec)
# supposed to register
assert msg_context.is_registered('test_ros/TestString'), msg_context
# test invalid search path
try:
load_msg_by_type(msg_context, 'test_ros/TestString', [test_string_path])
assert False, "should have raised"
except ValueError:
pass
# test not found
try:
load_msg_by_type(msg_context, 'test_ros/Fake', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
# test all the known geometry msgs
test_d = get_test_dir()
for f in os.listdir(geometry_d):
if f.endswith('.msg'):
short = f[:-4]
msg_type = 'geometry_msgs/%s'%short
spec = load_msg_by_type(msg_context, msg_type, search_path)
assert spec is not None
assert spec.package == 'geometry_msgs'
assert spec.full_name == msg_type
assert spec.short_name == short
with open(os.path.join(geometry_d, f)) as file_h:
assert spec.text == file_h.read()
# all types with 'Stamped' in name have headers
if 'Stamped' in f:
assert spec.has_header(), msg_type
def test_get_msg_file():
from genmsg import MsgNotFound
from genmsg.msg_loader import get_msg_file
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
search_path = {
'test_ros': [ test_ros_dir ],
}
assert test_string_path == get_msg_file('test_ros', 'TestString', search_path)
try:
get_msg_file('test_ros', 'DNE', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
try:
get_msg_file('bad_pkg', 'TestString', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
# test with invalid search path
try:
get_msg_file('test_ros', 'TestString', [test_string_path])
assert False, "should have raised"
except ValueError:
pass
def test_get_srv_file():
from genmsg import MsgNotFound
from genmsg.msg_loader import get_srv_file
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'srv')
std_srvs_dir = os.path.join(test_d, 'std_srvs', 'srv')
empty_path = os.path.join(std_srvs_dir, 'Empty.srv')
search_path = {
'test_ros': [ test_ros_dir ],
'std_srvs': [ std_srvs_dir ],
}
assert empty_path == get_srv_file('std_srvs', 'Empty', search_path)
try:
get_srv_file('test_ros', 'DNE', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
try:
get_srv_file('bad_pkg', 'TestString', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
# test with invalid search path
try:
get_srv_file('std_srvs', 'Empty', [std_srvs_dir])
assert False, "should have raised"
except ValueError:
pass
def test_MsgContext():
from genmsg.msg_loader import MsgContext, load_msg_from_file
msg_context = MsgContext()
assert not msg_context.is_registered('time')
assert not msg_context.is_registered('duration')
msg_context = MsgContext.create_default()
# tripwires
repr(msg_context)
str(msg_context)
assert msg_context.is_registered('time'), msg_context._registered_packages
assert msg_context.is_registered('duration')
assert not msg_context.is_registered('test_ros/TestString')
assert not msg_context.is_registered('Header')
# start loading stuff into context
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
spec = load_msg_from_file(msg_context, test_string_path, 'test_ros/TestString')
msg_context.register('test_ros/TestString', spec)
assert msg_context.get_registered('test_ros/TestString') == spec
try:
msg_context.get_registered('bad/TestString')
assert False, 'should have raised'
except KeyError:
pass
assert msg_context.is_registered('test_ros/TestString')
# test Header
assert not msg_context.is_registered('Header')
assert not msg_context.is_registered('std_msgs/Header')
msg_context.register('std_msgs/Header', spec)
assert msg_context.is_registered('std_msgs/Header')
def test_load_srv_from_file():
from genmsg.msg_loader import MsgContext, load_srv_from_file
msg_context = MsgContext.create_default()
d = get_test_dir()
filename = os.path.join(d, 'test_ros', 'srv', 'AddTwoInts.srv')
with open(filename, 'r') as f:
text = f.read()
full_name = 'test_ros/AddTwoInts'
spec = load_srv_from_file(msg_context, filename, full_name)
assert spec == load_srv_from_file(msg_context, filename, full_name)
assert ['int64', 'int64'] == spec.request.types, spec.request.types
assert ['a', 'b'] == spec.request.names
assert text == spec.text
assert full_name == spec.full_name
def test_load_msg_depends():
#TODO: should there just be a 'load_msg, implicit=True?'
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_msg_depends, MsgNotFound
test_d = get_test_dir()
search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ os.path.join(test_d, 'geometry_msgs', 'msg') ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
'invalid': [ os.path.join(test_d, 'invalid', 'msg') ],
}
# Test not found
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'invalid/BadDepend', search_path)
try:
load_msg_depends(msg_context, root_spec, search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
root_spec = load_msg_by_type(msg_context, 'invalid/BadLocalDepend', search_path)
try:
load_msg_depends(msg_context, root_spec, search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'std_msgs/Int32', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Int32.msg')
assert file_p == msg_context.get_file('std_msgs/Int32')
assert [] == msg_context.get_depends('std_msgs/Int32')
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'std_msgs/Header', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Header.msg')
assert file_p == msg_context.get_file('std_msgs/Header')
assert [] == msg_context.get_depends('std_msgs/Header')
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'Header', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Header.msg')
assert file_p == msg_context.get_file('std_msgs/Header')
assert [] == msg_context.get_depends('std_msgs/Header')
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'std_msgs/Int32MultiArray', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Int32MultiArray.msg')
assert file_p == msg_context.get_file('std_msgs/Int32MultiArray')
val = msg_context.get_all_depends('std_msgs/Int32MultiArray')
assert set(['std_msgs/MultiArrayLayout', 'std_msgs/MultiArrayDimension']) == set(val), val
assert 2 == len(val), val
val = msg_context.get_depends('std_msgs/Int32MultiArray')
assert set(['std_msgs/MultiArrayLayout']) == set(val), val
for s in ['MultiArrayLayout', 'MultiArrayDimension']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
def test_load_msg_depends_stamped():
#TODO: should there just be a 'load_msg, implicit=True?'
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_msg_depends
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ geometry_d ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
}
# Test with Stamped and deeper hierarchies, Header
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'geometry_msgs/PoseStamped', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'geometry_msgs', 'msg', 'PoseStamped.msg')
assert file_p == msg_context.get_file('geometry_msgs/PoseStamped')
val = msg_context.get_all_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose', 'geometry_msgs/Point', 'geometry_msgs/Quaternion']) == set(val), val
val = msg_context.get_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Pose', 'Point', 'Quaternion']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'geometry_msgs/TwistWithCovarianceStamped', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'geometry_msgs', 'msg', 'TwistWithCovarianceStamped.msg')
assert file_p == msg_context.get_file('geometry_msgs/TwistWithCovarianceStamped')
val = msg_context.get_all_depends('geometry_msgs/TwistWithCovarianceStamped')
assert set(['std_msgs/Header', 'geometry_msgs/TwistWithCovariance', 'geometry_msgs/Twist', 'geometry_msgs/Vector3']) == set(val), val
val = msg_context.get_depends('geometry_msgs/TwistWithCovarianceStamped')
assert set(['std_msgs/Header', 'geometry_msgs/TwistWithCovariance']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['TwistWithCovariance', 'Twist', 'Vector3']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'sensor_msgs/Imu', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'sensor_msgs', 'msg', 'Imu.msg')
assert file_p == msg_context.get_file('sensor_msgs/Imu')
val = msg_context.get_all_depends('sensor_msgs/Imu')
assert set(['std_msgs/Header', 'geometry_msgs/Quaternion', 'geometry_msgs/Vector3']) == set(val), val
val = msg_context.get_depends('sensor_msgs/Imu')
assert set(['std_msgs/Header', 'geometry_msgs/Quaternion', 'geometry_msgs/Vector3']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Quaternion', 'Vector3']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
def test_load_depends_msg():
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_depends, MsgNotFound, load_srv_by_type
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
msg_search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ geometry_d ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
'invalid': [ os.path.join(test_d, 'invalid', 'msg') ],
}
# Test not found
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'invalid/BadDepend', msg_search_path)
try:
load_depends(msg_context, root_spec, msg_search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
root_spec = load_msg_by_type(msg_context, 'invalid/BadLocalDepend', msg_search_path)
try:
load_depends(msg_context, root_spec, msg_search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
# Test with msgs
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'geometry_msgs/PoseStamped', msg_search_path)
load_depends(msg_context, root_spec, msg_search_path)
file_p = os.path.join(test_d, 'geometry_msgs', 'msg', 'PoseStamped.msg')
assert file_p == msg_context.get_file('geometry_msgs/PoseStamped')
val = msg_context.get_all_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose', 'geometry_msgs/Point', 'geometry_msgs/Quaternion']) == set(val), val
val = msg_context.get_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Pose', 'Point', 'Quaternion']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'sensor_msgs/Imu', msg_search_path)
load_depends(msg_context, root_spec, msg_search_path)
file_p = os.path.join(test_d, 'sensor_msgs', 'msg', 'Imu.msg')
assert file_p == msg_context.get_file('sensor_msgs/Imu')
val = msg_context.get_depends('sensor_msgs/Imu')
assert set(['std_msgs/Header', 'geometry_msgs/Quaternion', 'geometry_msgs/Vector3']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Quaternion', 'Vector3']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
def test_load_depends_srv():
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_depends, MsgNotFound, load_srv_by_type
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
msg_search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ geometry_d ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
'invalid': [ os.path.join(test_d, 'invalid', 'msg') ],
}
# Test with srvs
srv_search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'srv') ],
'std_srvs': [ os.path.join(test_d, 'std_srvs', 'srv') ],
}
msg_context = MsgContext.create_default()
root_spec = load_srv_by_type(msg_context, 'test_ros/AddTwoInts', srv_search_path)
load_depends(msg_context, root_spec, msg_search_path)
val = msg_context.get_depends('test_ros/AddTwoIntsRequest')
assert val == [], val
val = msg_context.get_depends('test_ros/AddTwoIntsResponse')
assert val == [], val
# test with srv that has depends
msg_context = MsgContext.create_default()
response_deps = ['std_msgs/Header', 'geometry_msgs/Pose', 'geometry_msgs/PoseStamped', 'geometry_msgs/Point', 'geometry_msgs/Quaternion']
root_spec = load_srv_by_type(msg_context, 'test_ros/GetPoseStamped', srv_search_path)
load_depends(msg_context, root_spec, msg_search_path)
for d in response_deps:
assert msg_context.is_registered(d)
val = msg_context.get_depends('test_ros/GetPoseStampedRequest')
assert val == [], val
val = msg_context.get_depends('test_ros/GetPoseStampedResponse')
assert val == ['geometry_msgs/PoseStamped']
# Test with nonsense
class Foo(object): pass
try:
load_depends(msg_context, Foo(), msg_search_path)
assert False, "should have raised"
except ValueError:
pass
def test_load_srv_by_type():
from genmsg.msg_loader import load_srv_by_type, MsgContext, MsgNotFound
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'srv')
std_srvs_dir = os.path.join(test_d, 'std_srvs', 'srv')
empty_path = os.path.join(std_srvs_dir, 'Empty.srv')
a2i_path = os.path.join(std_srvs_dir, 'AddTwoInts.srv')
search_path = {
'test_ros': [ test_ros_dir ],
'std_srvs': [ std_srvs_dir ],
}
msg_context = MsgContext.create_default()
spec = load_srv_by_type(msg_context, 'std_srvs/Empty', search_path)
assert msg_context.is_registered('std_srvs/EmptyRequest')
assert msg_context.is_registered('std_srvs/EmptyResponse')
assert msg_context.get_registered('std_srvs/EmptyRequest') == spec.request
assert msg_context.get_registered('std_srvs/EmptyResponse') == spec.response
assert msg_context.get_file('std_srvs/EmptyRequest') == empty_path, msg_context.get_file('std_srvs/EmptyRequest')
assert msg_context.get_file('std_srvs/EmptyResponse') == empty_path,msg_context.get_file('std_srvs/EmptyResponse')
assert spec.request.full_name == 'std_srvs/EmptyRequest'
assert spec.response.full_name == 'std_srvs/EmptyResponse'
assert spec.request.short_name == 'EmptyRequest'
assert spec.response.short_name == 'EmptyResponse'
assert spec.request.package == 'std_srvs'
assert spec.response.package == 'std_srvs'
for f in [spec.request.names, spec.request.types, spec.response.names, spec.response.types]:
assert [] == f
spec = load_srv_by_type(msg_context, 'test_ros/AddTwoInts', search_path)
assert msg_context.is_registered('test_ros/AddTwoIntsRequest')
assert msg_context.is_registered('test_ros/AddTwoIntsResponse')
assert msg_context.get_registered('test_ros/AddTwoIntsRequest') == spec.request
assert msg_context.get_registered('test_ros/AddTwoIntsResponse') == spec.response
assert spec.request.types == ['int64', 'int64'], spec.request.types
assert spec.request.names == ['a', 'b'], spec.request.names
assert spec.response.types == ['int64'], spec.response.types
assert spec.response.names == ['sum'], spec.response.names
# test invalid search path
try:
load_srv_by_type(msg_context, 'test_ros/AddTwoInts', [std_srvs_dir])
assert False, "should have raised"
except ValueError:
pass
# test not found
try:
load_srv_by_type(msg_context, 'test_ros/Fake', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
| gpl-3.0 |
eric-stanley/youtube-dl | youtube_dl/extractor/odnoklassniki.py | 30 | 3626 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
unified_strdate,
int_or_none,
qualities,
unescapeHTML,
)
class OdnoklassnikiIE(InfoExtractor):
_VALID_URL = r'https?://(?:odnoklassniki|ok)\.ru/(?:video|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
_TESTS = [{
# metadata in JSON
'url': 'http://ok.ru/video/20079905452',
'md5': '6ba728d85d60aa2e6dd37c9e70fdc6bc',
'info_dict': {
'id': '20079905452',
'ext': 'mp4',
'title': 'Культура меняет нас (прекрасный ролик!))',
'duration': 100,
'upload_date': '20141207',
'uploader_id': '330537914540',
'uploader': 'Виталий Добровольский',
'like_count': int,
'age_limit': 0,
},
}, {
# metadataUrl
'url': 'http://ok.ru/video/63567059965189-0',
'md5': '9676cf86eff5391d35dea675d224e131',
'info_dict': {
'id': '63567059965189-0',
'ext': 'mp4',
'title': 'Девушка без комплексов ...',
'duration': 191,
'upload_date': '20150518',
'uploader_id': '534380003155',
'uploader': '☭ Андрей Мещанинов ☭',
'like_count': int,
'age_limit': 0,
},
}, {
'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://ok.ru/video/%s' % video_id, video_id)
player = self._parse_json(
unescapeHTML(self._search_regex(
r'data-attributes="([^"]+)"', webpage, 'player')),
video_id)
flashvars = player['flashvars']
metadata = flashvars.get('metadata')
if metadata:
metadata = self._parse_json(metadata, video_id)
else:
metadata = self._download_json(
compat_urllib_parse_unquote(flashvars['metadataUrl']),
video_id, 'Downloading metadata JSON')
movie = metadata['movie']
title = movie['title']
thumbnail = movie.get('poster')
duration = int_or_none(movie.get('duration'))
author = metadata.get('author', {})
uploader_id = author.get('id')
uploader = author.get('name')
upload_date = unified_strdate(self._html_search_meta(
'ya:ovs:upload_date', webpage, 'upload date', default=None))
age_limit = None
adult = self._html_search_meta(
'ya:ovs:adult', webpage, 'age limit', default=None)
if adult:
age_limit = 18 if adult == 'true' else 0
like_count = int_or_none(metadata.get('likeCount'))
quality = qualities(('mobile', 'lowest', 'low', 'sd', 'hd'))
formats = [{
'url': f['url'],
'ext': 'mp4',
'format_id': f['name'],
'quality': quality(f['name']),
} for f in metadata['videos']]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'uploader': uploader,
'uploader_id': uploader_id,
'like_count': like_count,
'age_limit': age_limit,
'formats': formats,
}
| unlicense |
shireenrao/pelican-plugins | github_activity/github_activity.py | 75 | 1925 | # -*- coding: utf-8 -*-
# NEEDS WORK
"""
Copyright (c) Marco Milanesi <kpanic@gnufunk.org>
Github Activity
---------------
A plugin to list your Github Activity
"""
from __future__ import unicode_literals, print_function
import logging
logger = logging.getLogger(__name__)
from pelican import signals
class GitHubActivity():
"""
A class created to fetch github activity with feedparser
"""
def __init__(self, generator):
import feedparser
self.activities = feedparser.parse(
generator.settings['GITHUB_ACTIVITY_FEED'])
self.max_entries = generator.settings['GITHUB_ACTIVITY_MAX_ENTRIES']
def fetch(self):
"""
returns a list of html snippets fetched from github actitivy feed
"""
entries = []
for activity in self.activities['entries']:
entries.append(
[element for element in [activity['title'],
activity['content'][0]['value']]])
return entries[0:self.max_entries]
def fetch_github_activity(gen, metadata):
"""
registered handler for the github activity plugin
it puts in generator.context the html needed to be displayed on a
template
"""
if 'GITHUB_ACTIVITY_FEED' in gen.settings.keys():
gen.context['github_activity'] = gen.plugin_instance.fetch()
def feed_parser_initialization(generator):
"""
Initialization of feed parser
"""
generator.plugin_instance = GitHubActivity(generator)
def register():
"""
Plugin registration
"""
try:
signals.article_generator_init.connect(feed_parser_initialization)
signals.article_generator_context.connect(fetch_github_activity)
except ImportError:
logger.warning('`github_activity` failed to load dependency `feedparser`.'
'`github_activity` plugin not loaded.')
| agpl-3.0 |
SGCreations/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/pip/_vendor/requests/packages/urllib3/connection.py | 309 | 6533 | # urllib3/connection.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
import socket
from socket import timeout as SocketTimeout
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
ssl = None
HTTPSConnection = DummyConnection
class BaseSSLError(BaseException):
pass
try: # Python 3
from http.client import HTTPSConnection as _HTTPSConnection
except ImportError:
from httplib import HTTPSConnection as _HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .exceptions import (
ConnectTimeoutError,
)
from .packages.ssl_match_hostname import match_hostname
from .packages import six
from .util import (
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
port_by_scheme = {
'http': 80,
'https': 443,
}
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
"""
default_port = port_by_scheme['http']
# By default, disable Nagle's Algorithm.
tcp_nodelay = 1
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
if sys.version_info < (2, 7): # Python 2.6 and older
kw.pop('source_address', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: a new socket connection
"""
extra_args = []
if self.source_address: # Python 2.7+
extra_args.append(self.source_address)
conn = socket.create_connection(
(self.host, self.port), self.timeout, *extra_args)
conn.setsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY, self.tcp_nodelay)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
conn_kw = {}
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
try:
sock = socket.create_connection(
address=(self.host, self.port), timeout=self.timeout,
**self.conn_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,
self.tcp_nodelay)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = sock
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif self.assert_hostname is not False:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or hostname)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
| apache-2.0 |
Colonel-Top/Line-Bot-Python | python/oauth2client/crypt.py | 60 | 8447 | # -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crypto-related routines for oauth2client."""
import json
import logging
import time
from oauth2client import _helpers
from oauth2client import _pure_python_crypt
RsaSigner = _pure_python_crypt.RsaSigner
RsaVerifier = _pure_python_crypt.RsaVerifier
CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
logger = logging.getLogger(__name__)
class AppIdentityError(Exception):
"""Error to indicate crypto failure."""
def _bad_pkcs12_key_as_pem(*args, **kwargs):
raise NotImplementedError('pkcs12_key_as_pem requires OpenSSL.')
try:
from oauth2client import _openssl_crypt
OpenSSLSigner = _openssl_crypt.OpenSSLSigner
OpenSSLVerifier = _openssl_crypt.OpenSSLVerifier
pkcs12_key_as_pem = _openssl_crypt.pkcs12_key_as_pem
except ImportError: # pragma: NO COVER
OpenSSLVerifier = None
OpenSSLSigner = None
pkcs12_key_as_pem = _bad_pkcs12_key_as_pem
try:
from oauth2client import _pycrypto_crypt
PyCryptoSigner = _pycrypto_crypt.PyCryptoSigner
PyCryptoVerifier = _pycrypto_crypt.PyCryptoVerifier
except ImportError: # pragma: NO COVER
PyCryptoVerifier = None
PyCryptoSigner = None
if OpenSSLSigner:
Signer = OpenSSLSigner
Verifier = OpenSSLVerifier
elif PyCryptoSigner: # pragma: NO COVER
Signer = PyCryptoSigner
Verifier = PyCryptoVerifier
else: # pragma: NO COVER
Signer = RsaSigner
Verifier = RsaVerifier
def make_signed_jwt(signer, payload, key_id=None):
"""Make a signed JWT.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
signer: crypt.Signer, Cryptographic signer.
payload: dict, Dictionary of data to convert to JSON and then sign.
key_id: string, (Optional) Key ID header.
Returns:
string, The JWT for the payload.
"""
header = {'typ': 'JWT', 'alg': 'RS256'}
if key_id is not None:
header['kid'] = key_id
segments = [
_helpers._urlsafe_b64encode(_helpers._json_encode(header)),
_helpers._urlsafe_b64encode(_helpers._json_encode(payload)),
]
signing_input = b'.'.join(segments)
signature = signer.sign(signing_input)
segments.append(_helpers._urlsafe_b64encode(signature))
logger.debug(str(segments))
return b'.'.join(segments)
def _verify_signature(message, signature, certs):
"""Verifies signed content using a list of certificates.
Args:
message: string or bytes, The message to verify.
signature: string or bytes, The signature on the message.
certs: iterable, certificates in PEM format.
Raises:
AppIdentityError: If none of the certificates can verify the message
against the signature.
"""
for pem in certs:
verifier = Verifier.from_string(pem, is_x509_cert=True)
if verifier.verify(message, signature):
return
# If we have not returned, no certificate confirms the signature.
raise AppIdentityError('Invalid token signature')
def _check_audience(payload_dict, audience):
"""Checks audience field from a JWT payload.
Does nothing if the passed in ``audience`` is null.
Args:
payload_dict: dict, A dictionary containing a JWT payload.
audience: string or NoneType, an audience to check for in
the JWT payload.
Raises:
AppIdentityError: If there is no ``'aud'`` field in the payload
dictionary but there is an ``audience`` to check.
AppIdentityError: If the ``'aud'`` field in the payload dictionary
does not match the ``audience``.
"""
if audience is None:
return
audience_in_payload = payload_dict.get('aud')
if audience_in_payload is None:
raise AppIdentityError(
'No aud field in token: {0}'.format(payload_dict))
if audience_in_payload != audience:
raise AppIdentityError('Wrong recipient, {0} != {1}: {2}'.format(
audience_in_payload, audience, payload_dict))
def _verify_time_range(payload_dict):
"""Verifies the issued at and expiration from a JWT payload.
Makes sure the current time (in UTC) falls between the issued at and
expiration for the JWT (with some skew allowed for via
``CLOCK_SKEW_SECS``).
Args:
payload_dict: dict, A dictionary containing a JWT payload.
Raises:
AppIdentityError: If there is no ``'iat'`` field in the payload
dictionary.
AppIdentityError: If there is no ``'exp'`` field in the payload
dictionary.
AppIdentityError: If the JWT expiration is too far in the future (i.e.
if the expiration would imply a token lifetime
longer than what is allowed.)
AppIdentityError: If the token appears to have been issued in the
future (up to clock skew).
AppIdentityError: If the token appears to have expired in the past
(up to clock skew).
"""
# Get the current time to use throughout.
now = int(time.time())
# Make sure issued at and expiration are in the payload.
issued_at = payload_dict.get('iat')
if issued_at is None:
raise AppIdentityError(
'No iat field in token: {0}'.format(payload_dict))
expiration = payload_dict.get('exp')
if expiration is None:
raise AppIdentityError(
'No exp field in token: {0}'.format(payload_dict))
# Make sure the expiration gives an acceptable token lifetime.
if expiration >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError(
'exp field too far in future: {0}'.format(payload_dict))
# Make sure (up to clock skew) that the token wasn't issued in the future.
earliest = issued_at - CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, {0} < {1}: {2}'.format(
now, earliest, payload_dict))
# Make sure (up to clock skew) that the token isn't already expired.
latest = expiration + CLOCK_SKEW_SECS
if now > latest:
raise AppIdentityError('Token used too late, {0} > {1}: {2}'.format(
now, latest, payload_dict))
def verify_signed_jwt_with_certs(jwt, certs, audience=None):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
certs: dict, Dictionary where values of public keys in PEM format.
audience: string, The audience, 'aud', that this JWT should contain. If
None then the JWT's 'aud' parameter is not verified.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError: if any checks are failed.
"""
jwt = _helpers._to_bytes(jwt)
if jwt.count(b'.') != 2:
raise AppIdentityError(
'Wrong number of segments in token: {0}'.format(jwt))
header, payload, signature = jwt.split(b'.')
message_to_sign = header + b'.' + payload
signature = _helpers._urlsafe_b64decode(signature)
# Parse token.
payload_bytes = _helpers._urlsafe_b64decode(payload)
try:
payload_dict = json.loads(_helpers._from_bytes(payload_bytes))
except:
raise AppIdentityError('Can\'t parse token: {0}'.format(payload_bytes))
# Verify that the signature matches the message.
_verify_signature(message_to_sign, signature, certs.values())
# Verify the issued at and created times in the payload.
_verify_time_range(payload_dict)
# Check audience.
_check_audience(payload_dict, audience)
return payload_dict
| mit |
klenks/jobsportal | venv/lib/python2.7/site-packages/django/db/backends/oracle/utils.py | 539 | 1252 | import datetime
from django.utils.encoding import force_bytes, force_text
from .base import Database
# Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will
# also be True for cx_Oracle 5.1 and in Python 3.0. See #19606
if int(Database.version.split('.', 1)[0]) >= 5 and \
(int(Database.version.split('.', 2)[1]) >= 1 or
not hasattr(Database, 'UNICODE')):
convert_unicode = force_text
else:
convert_unicode = force_bytes
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class Oracle_datetime(datetime.datetime):
"""
A datetime object, with an additional class attribute
to tell cx_Oracle to save the microseconds too.
"""
input_size = Database.TIMESTAMP
@classmethod
def from_datetime(cls, dt):
return Oracle_datetime(
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.microsecond,
)
| mit |
nsdown/zhihu-py3 | zhihu/activity.py | 14 | 1222 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = '7sDream'
class Activity:
"""用户动态类,请使用Author.activities获取."""
def __init__(self, act_type, act_time, **kwarg):
"""创建用户动态类实例.
:param ActType act_type: 动态类型
:param datatime.datatime act_time: 动态发生时间
:return: 用户动态对象
:rtype: Activity
:说明:
根据Activity.type的不同可以获取的属性,具体请看 :class:`.ActType`
"""
from .acttype import ActType
if not isinstance(act_type, ActType):
raise ValueError('invalid activity type')
if len(kwarg) != 1:
raise ValueError('except one kwarg (%d given)' % len(kwarg))
self.type = act_type
self.time = act_time
for k, v in kwarg.items():
self._attr = k
setattr(self, k, v)
@property
def content(self):
"""获取此对象中能提供的那个属性,对应表请查看ActType类.
:return: 对象提供的对象
:rtype: Author or Question or Answer or Topic or Column or Post
"""
return getattr(self, self._attr)
| mit |
IronLanguages/ironpython2 | Src/StdLib/repackage/setuptools/setuptools/command/setopt.py | 299 | 5085 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import distutils
import os
from setuptools.extern.six.moves import configparser
from setuptools import Command
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == 'local':
return 'setup.cfg'
if kind == 'global':
return os.path.join(
os.path.dirname(distutils.__file__), 'distutils.cfg'
)
if kind == 'user':
dot = os.name == 'posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
log.debug("Reading configuration from %s", filename)
opts = configparser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option, value in options.items():
if value is None:
log.debug(
"Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section, option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section, option, value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, 'w') as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames) > 1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-', '_'): self.set_value}
},
self.dry_run
)
| apache-2.0 |
tcwicklund/django | django/contrib/gis/gdal/prototypes/ds.py | 349 | 4403 | """
This module houses the ctypes function prototypes for OGR DataSource
related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*,
OGR_Fld_* routines are relevant here.
"""
from ctypes import POINTER, c_char_p, c_double, c_int, c_long, c_void_p
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import GDAL_VERSION, lgdal
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, geom_output, int64_output, int_output,
srs_output, void_output, voidptr_output,
)
c_int_p = POINTER(c_int) # shortcut type
# Driver Routines
register_all = void_output(lgdal.OGRRegisterAll, [], errcheck=False)
cleanup_all = void_output(lgdal.OGRCleanupAll, [], errcheck=False)
get_driver = voidptr_output(lgdal.OGRGetDriver, [c_int])
get_driver_by_name = voidptr_output(lgdal.OGRGetDriverByName, [c_char_p], errcheck=False)
get_driver_count = int_output(lgdal.OGRGetDriverCount, [])
get_driver_name = const_string_output(lgdal.OGR_Dr_GetName, [c_void_p], decoding='ascii')
# DataSource
open_ds = voidptr_output(lgdal.OGROpen, [c_char_p, c_int, POINTER(c_void_p)])
destroy_ds = void_output(lgdal.OGR_DS_Destroy, [c_void_p], errcheck=False)
release_ds = void_output(lgdal.OGRReleaseDataSource, [c_void_p])
get_ds_name = const_string_output(lgdal.OGR_DS_GetName, [c_void_p])
get_layer = voidptr_output(lgdal.OGR_DS_GetLayer, [c_void_p, c_int])
get_layer_by_name = voidptr_output(lgdal.OGR_DS_GetLayerByName, [c_void_p, c_char_p])
get_layer_count = int_output(lgdal.OGR_DS_GetLayerCount, [c_void_p])
# Layer Routines
get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int])
get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long])
get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int])
get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p])
get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p])
get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p])
reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False)
test_capability = int_output(lgdal.OGR_L_TestCapability, [c_void_p, c_char_p])
get_spatial_filter = geom_output(lgdal.OGR_L_GetSpatialFilter, [c_void_p])
set_spatial_filter = void_output(lgdal.OGR_L_SetSpatialFilter, [c_void_p, c_void_p], errcheck=False)
set_spatial_filter_rect = void_output(lgdal.OGR_L_SetSpatialFilterRect,
[c_void_p, c_double, c_double, c_double, c_double], errcheck=False
)
# Feature Definition Routines
get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p])
get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p])
get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int])
# Feature Routines
clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p])
destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False)
feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p])
get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p])
get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p])
get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int])
get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p])
get_field_as_datetime = int_output(lgdal.OGR_F_GetFieldAsDateTime,
[c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p]
)
get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int])
get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int])
if GDAL_VERSION >= (2, 0):
get_field_as_integer64 = int64_output(lgdal.OGR_F_GetFieldAsInteger64, [c_void_p, c_int])
get_field_as_string = const_string_output(lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int])
get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p])
# Field Routines
get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p])
get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p])
get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p])
get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int])
get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p])
| bsd-3-clause |
CryptoRepairCrew/p2pool | nattraverso/portmapper.py | 288 | 4157 | """
Generic NAT Port mapping interface.
TODO: Example
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
from twisted.internet.base import BasePort
# Public API
def get_port_mapper(proto="TCP"):
"""
Returns a L{NATMapper} instance, suited to map a port for
the given protocol. Defaults to TCP.
For the moment, only upnp mapper is available. It accepts both UDP and TCP.
@param proto: The protocol: 'TCP' or 'UDP'
@type proto: string
@return: A deferred called with a L{NATMapper} instance
@rtype: L{twisted.internet.defer.Deferred}
"""
import nattraverso.pynupnp
return nattraverso.pynupnp.get_port_mapper()
class NATMapper:
"""
Define methods to map port objects (as returned by twisted's listenXX).
This allows NAT to be traversed from incoming packets.
Currently the only implementation of this class is the UPnP Mapper, which
can map UDP and TCP ports, if an UPnP Device exists.
"""
def __init__(self):
raise NotImplementedError("Cannot instantiate the class")
def map(self, port):
"""
Create a mapping for the given twisted's port object.
The deferred will call back with a tuple (extaddr, extport):
- extaddr: The ip string of the external ip address of this host
- extport: the external port number used to map the given Port object
When called multiple times with the same Port,
callback with the existing mapping.
@param port: The port object to map
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@return: A deferred called with the above defined tuple
@rtype: L{twisted.internet.defer.Deferred}
"""
raise NotImplementedError
def info(self, port):
"""
Returns the existing mapping for the given port object. That means map()
has to be called before.
@param port: The port object to retreive info from
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@raise ValueError: When there is no such existing mapping
@return: a tuple (extaddress, extport).
@see: L{map() function<map>}
"""
raise NotImplementedError
def unmap(self, port):
"""
Remove an existing mapping for the given twisted's port object.
@param port: The port object to unmap
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@return: A deferred called with None
@rtype: L{twisted.internet.defer.Deferred}
@raise ValueError: When there is no such existing mapping
"""
raise NotImplementedError
def get_port_mappings(self):
"""
Returns a deferred that will be called with a dictionnary of the
existing mappings.
The dictionnary structure is the following:
- Keys: tuple (protocol, external_port)
- protocol is "TCP" or "UDP".
- external_port is the external port number, as see on the
WAN side.
- Values:tuple (internal_ip, internal_port)
- internal_ip is the LAN ip address of the host.
- internal_port is the internal port number mapped
to external_port.
@return: A deferred called with the above defined dictionnary
@rtype: L{twisted.internet.defer.Deferred}
"""
raise NotImplementedError
def _check_valid_port(self, port):
"""Various Port object validity checks. Raise a ValueError."""
if not isinstance(port, BasePort):
raise ValueError("expected a Port, got %r"%(port))
if not port.connected:
raise ValueError("Port %r is not listening"%(port))
loc_addr = port.getHost()
if loc_addr.port == 0:
raise ValueError("Port %r has port number of 0"%(port))
| gpl-3.0 |
translate/pootle | pootle/core/models/virtualresource.py | 5 | 1257 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from ..mixins import TreeItem
class VirtualResource(TreeItem):
"""An object representing a virtual resource.
A virtual resource doesn't live in the DB and has a unique
`pootle_path` of its own. It's a simple collection of actual
resources.
For instance, this can be used in projects to have cross-language
references.
Don't use this object as-is, rather subclass it and adapt the
implementation details for each context.
"""
def __init__(self, resources, pootle_path, *args, **kwargs):
self.resources = resources #: Collection of underlying resources
self.pootle_path = pootle_path
self.context = kwargs.pop("context", None)
super(VirtualResource, self).__init__(*args, **kwargs)
def __unicode__(self):
return self.pootle_path
# # # TreeItem
def get_children(self):
return self.resources
def get_cachekey(self):
return self.pootle_path
# # # /TreeItem
| gpl-3.0 |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/tkinter/dnd.py | 15 | 11488 | """Drag-and-drop support for Tkinter.
This is very preliminary. I currently only support dnd *within* one
application, between different windows (or within the same window).
I am trying to make this as generic as possible -- not dependent on
the use of a particular widget or icon type, etc. I also hope that
this will work with Pmw.
To enable an object to be dragged, you must create an event binding
for it that starts the drag-and-drop process. Typically, you should
bind <ButtonPress> to a callback function that you write. The function
should call Tkdnd.dnd_start(source, event), where 'source' is the
object to be dragged, and 'event' is the event that invoked the call
(the argument to your callback function). Even though this is a class
instantiation, the returned instance should not be stored -- it will
be kept alive automatically for the duration of the drag-and-drop.
When a drag-and-drop is already in process for the Tk interpreter, the
call is *ignored*; this normally averts starting multiple simultaneous
dnd processes, e.g. because different button callbacks all
dnd_start().
The object is *not* necessarily a widget -- it can be any
application-specific object that is meaningful to potential
drag-and-drop targets.
Potential drag-and-drop targets are discovered as follows. Whenever
the mouse moves, and at the start and end of a drag-and-drop move, the
Tk widget directly under the mouse is inspected. This is the target
widget (not to be confused with the target object, yet to be
determined). If there is no target widget, there is no dnd target
object. If there is a target widget, and it has an attribute
dnd_accept, this should be a function (or any callable object). The
function is called as dnd_accept(source, event), where 'source' is the
object being dragged (the object passed to dnd_start() above), and
'event' is the most recent event object (generally a <Motion> event;
it can also be <ButtonPress> or <ButtonRelease>). If the dnd_accept()
function returns something other than None, this is the new dnd target
object. If dnd_accept() returns None, or if the target widget has no
dnd_accept attribute, the target widget's parent is considered as the
target widget, and the search for a target object is repeated from
there. If necessary, the search is repeated all the way up to the
root widget. If none of the target widgets can produce a target
object, there is no target object (the target object is None).
The target object thus produced, if any, is called the new target
object. It is compared with the old target object (or None, if there
was no old target widget). There are several cases ('source' is the
source object, and 'event' is the most recent event object):
- Both the old and new target objects are None. Nothing happens.
- The old and new target objects are the same object. Its method
dnd_motion(source, event) is called.
- The old target object was None, and the new target object is not
None. The new target object's method dnd_enter(source, event) is
called.
- The new target object is None, and the old target object is not
None. The old target object's method dnd_leave(source, event) is
called.
- The old and new target objects differ and neither is None. The old
target object's method dnd_leave(source, event), and then the new
target object's method dnd_enter(source, event) is called.
Once this is done, the new target object replaces the old one, and the
Tk mainloop proceeds. The return value of the methods mentioned above
is ignored; if they raise an exception, the normal exception handling
mechanisms take over.
The drag-and-drop processes can end in two ways: a final target object
is selected, or no final target object is selected. When a final
target object is selected, it will always have been notified of the
potential drop by a call to its dnd_enter() method, as described
above, and possibly one or more calls to its dnd_motion() method; its
dnd_leave() method has not been called since the last call to
dnd_enter(). The target is notified of the drop by a call to its
method dnd_commit(source, event).
If no final target object is selected, and there was an old target
object, its dnd_leave(source, event) method is called to complete the
dnd sequence.
Finally, the source object is notified that the drag-and-drop process
is over, by a call to source.dnd_end(target, event), specifying either
the selected target object, or None if no target object was selected.
The source object can use this to implement the commit action; this is
sometimes simpler than to do it in the target's dnd_commit(). The
target's dnd_commit() method could then simply be aliased to
dnd_leave().
At any time during a dnd sequence, the application can cancel the
sequence by calling the cancel() method on the object returned by
dnd_start(). This will call dnd_leave() if a target is currently
active; it will never call dnd_commit().
"""
import tkinter
# The factory function
def dnd_start(source, event):
h = DndHandler(source, event)
if h.root:
return h
else:
return None
# The class that does the work
class DndHandler:
root = None
def __init__(self, source, event):
if event.num > 5:
return
root = event.widget._root()
try:
root.__dnd
return # Don't start recursive dnd
except AttributeError:
root.__dnd = self
self.root = root
self.source = source
self.target = None
self.initial_button = button = event.num
self.initial_widget = widget = event.widget
self.release_pattern = "<B%d-ButtonRelease-%d>" % (button, button)
self.save_cursor = widget['cursor'] or ""
widget.bind(self.release_pattern, self.on_release)
widget.bind("<Motion>", self.on_motion)
widget['cursor'] = "hand2"
def __del__(self):
root = self.root
self.root = None
if root:
try:
del root.__dnd
except AttributeError:
pass
def on_motion(self, event):
x, y = event.x_root, event.y_root
target_widget = self.initial_widget.winfo_containing(x, y)
source = self.source
new_target = None
while target_widget:
try:
attr = target_widget.dnd_accept
except AttributeError:
pass
else:
new_target = attr(source, event)
if new_target:
break
target_widget = target_widget.master
old_target = self.target
if old_target is new_target:
if old_target:
old_target.dnd_motion(source, event)
else:
if old_target:
self.target = None
old_target.dnd_leave(source, event)
if new_target:
new_target.dnd_enter(source, event)
self.target = new_target
def on_release(self, event):
self.finish(event, 1)
def cancel(self, event=None):
self.finish(event, 0)
def finish(self, event, commit=0):
target = self.target
source = self.source
widget = self.initial_widget
root = self.root
try:
del root.__dnd
self.initial_widget.unbind(self.release_pattern)
self.initial_widget.unbind("<Motion>")
widget['cursor'] = self.save_cursor
self.target = self.source = self.initial_widget = self.root = None
if target:
if commit:
target.dnd_commit(source, event)
else:
target.dnd_leave(source, event)
finally:
source.dnd_end(target, event)
# ----------------------------------------------------------------------
# The rest is here for testing and demonstration purposes only!
class Icon:
def __init__(self, name):
self.name = name
self.canvas = self.label = self.id = None
def attach(self, canvas, x=10, y=10):
if canvas is self.canvas:
self.canvas.coords(self.id, x, y)
return
if self.canvas:
self.detach()
if not canvas:
return
label = tkinter.Label(canvas, text=self.name,
borderwidth=2, relief="raised")
id = canvas.create_window(x, y, window=label, anchor="nw")
self.canvas = canvas
self.label = label
self.id = id
label.bind("<ButtonPress>", self.press)
def detach(self):
canvas = self.canvas
if not canvas:
return
id = self.id
label = self.label
self.canvas = self.label = self.id = None
canvas.delete(id)
label.destroy()
def press(self, event):
if dnd_start(self, event):
# where the pointer is relative to the label widget:
self.x_off = event.x
self.y_off = event.y
# where the widget is relative to the canvas:
self.x_orig, self.y_orig = self.canvas.coords(self.id)
def move(self, event):
x, y = self.where(self.canvas, event)
self.canvas.coords(self.id, x, y)
def putback(self):
self.canvas.coords(self.id, self.x_orig, self.y_orig)
def where(self, canvas, event):
# where the corner of the canvas is relative to the screen:
x_org = canvas.winfo_rootx()
y_org = canvas.winfo_rooty()
# where the pointer is relative to the canvas widget:
x = event.x_root - x_org
y = event.y_root - y_org
# compensate for initial pointer offset
return x - self.x_off, y - self.y_off
def dnd_end(self, target, event):
pass
class Tester:
def __init__(self, root):
self.top = tkinter.Toplevel(root)
self.canvas = tkinter.Canvas(self.top, width=100, height=100)
self.canvas.pack(fill="both", expand=1)
self.canvas.dnd_accept = self.dnd_accept
def dnd_accept(self, source, event):
return self
def dnd_enter(self, source, event):
self.canvas.focus_set() # Show highlight border
x, y = source.where(self.canvas, event)
x1, y1, x2, y2 = source.canvas.bbox(source.id)
dx, dy = x2-x1, y2-y1
self.dndid = self.canvas.create_rectangle(x, y, x+dx, y+dy)
self.dnd_motion(source, event)
def dnd_motion(self, source, event):
x, y = source.where(self.canvas, event)
x1, y1, x2, y2 = self.canvas.bbox(self.dndid)
self.canvas.move(self.dndid, x-x1, y-y1)
def dnd_leave(self, source, event):
self.top.focus_set() # Hide highlight border
self.canvas.delete(self.dndid)
self.dndid = None
def dnd_commit(self, source, event):
self.dnd_leave(source, event)
x, y = source.where(self.canvas, event)
source.attach(self.canvas, x, y)
def test():
root = tkinter.Tk()
root.geometry("+1+1")
tkinter.Button(command=root.quit, text="Quit").pack()
t1 = Tester(root)
t1.top.geometry("+1+60")
t2 = Tester(root)
t2.top.geometry("+120+60")
t3 = Tester(root)
t3.top.geometry("+240+60")
i1 = Icon("ICON1")
i2 = Icon("ICON2")
i3 = Icon("ICON3")
i1.attach(t1.canvas)
i2.attach(t2.canvas)
i3.attach(t3.canvas)
root.mainloop()
if __name__ == '__main__':
test()
| apache-2.0 |
hiatobr/midiacapoeira | modules/queries.py | 1 | 1403 | # -*- coding: utf-8 -*-
from gluon import current
def tagQuery(tags, ctbl, ttbl, query = 0, op = 'or', field =
'texto_id'):
'''
Busca no banco de dados por conteúdo marcado pelas tags em <tags>.
A operação é recursiva, tag por tag, juntando o resultado de uma
busca ao resultado referente à tag anterior. Essa junção pode ser por
intersecção (op = 'and') ou por união (op = 'or').
Esta implementação preza por generalidade, de modo que a função
pode ser utilizada para buscar qualquer tipo de conteúdo, desde
que a variável <field> seja corretamente preenchida na chamada da
função.
<ctbl> = tabela de conteúdo
<ttbl> = tabela de tags
'''
db = current.db
try:
# Escolhe uma tag e procura por índices de textos que a contêm
tag_ref = db(ttbl.tag==tags.pop()).select(ttbl[field]).as_list()
tag_ref = map(list.pop, map(dict.values, tag_ref))
if query and op == 'or':
return tagQuery(tags, ctbl, ttbl, ctbl.id.belongs(tag_ref) |
query)
elif query and op == 'and':
return tagQuery (tags, ctbl, ttbl,
ctbl.id.belongs(tag_ref) & query)
else:
return tagQuery(tags, ctbl, ttbl, ctbl.id.belongs(tag_ref))
except IndexError:
return db(query).select(ctbl.ALL).as_list()
| gpl-3.0 |
jsaponara/opentaxforms | opentaxforms/serve.py | 1 | 3082 | #!/usr/bin/env python
from __future__ import print_function, absolute_import
import flask_restless
from argparse import ArgumentParser
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from .db import connect
from .version import appname, apiVersion
from .ut import Bag
def createApi(app,**kw):
db = SQLAlchemy(app)
conn, engine, metadata, md = connect(appname, **kw)
Base = declarative_base()
Session = sessionmaker(autocommit=False, autoflush=False, bind=engine)
mysession = scoped_session(Session)
apimanager = flask_restless.APIManager(app, session=mysession)
counts = {}
for tabl in md:
tablobj = md[tabl]
counts[tabl] = tablobj.count().execute().fetchone()[0]
attrs = dict(
__table__=tablobj,
# todo should flask_restless need __tablename__?
__tablename__=str(tabl),
)
attrs.update(dict(
orgn=dict(
form=db.relationship('Form'),
),
form=dict(
orgn=db.relationship('Orgn', back_populates='form'),
slot=db.relationship('Slot', back_populates='form'),
),
slot=dict(
form=db.relationship('Form'),
),
)[tabl])
tablcls = type(str(tabl).capitalize(), (Base, ), attrs)
colsToAdd = dict(
orgn=(),
form=(
'orgn', 'orgn.code',
),
slot=(
'form', 'form.code',
),
)[tabl]
colsToShow = [c.name for c in tablobj.columns]
colsToShow.extend(colsToAdd)
# print tabl,colsToShow
apimanager.create_api(
tablcls,
url_prefix='/api/v%s' % (apiVersion, ),
include_columns=colsToShow,
)
return counts
def parseCmdline():
'''Load command line arguments'''
parser = ArgumentParser(
description='Automates tax forms'
' and provides an API for new tax form interfaces'
)
parser.add_argument(
'-P', '--postgres',
help='use postgres database [default=sqlite]', action="store_true")
return parser.parse_args()
def createApp(**kw):
cmdline = kw.get('cmdline')
verbose = kw.get('verbose')
if 'cmdline' in kw:
del kw['cmdline']
if 'verbose' in kw:
del kw['verbose']
args = parseCmdline() if cmdline else Bag(dict(postgres=False))
app = Flask(appname)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # to suppress warning
counts = createApi(app,postgres=args.postgres, **kw)
if verbose:
print('serving {slot} slots in {form} forms from {orgn} orgns'.format(
**counts))
return app
def main(**kw):
app = createApp(dbpath='sqlite:///opentaxforms.sqlite3', **kw)
app.run()
if __name__ == "__main__":
main(cmdline=True, verbose=True)
| agpl-3.0 |
chrisburr/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 73 | 1232 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
sijie/bookkeeper | stream/clients/python/examples/admin.py | 3 | 3472 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bookkeeper import admin
from bookkeeper import kv
from bookkeeper.common.exceptions import KeyNotFoundError
from bookkeeper.common.exceptions import NamespaceNotFoundError
ns_name = "test"
ns_name_2 = "test2"
stream_name = "test_stream"
stream_name_2 = "test_stream_2"
client = admin.Client()
try:
client.namespaces().delete(ns_name)
except NamespaceNotFoundError:
print("Namespace '%s' doesn't not exist" % ns_name)
try:
client.namespaces().delete(ns_name_2)
except NamespaceNotFoundError:
print("Namespace '%s' doesn't not exist" % ns_name_2)
# create first namespace
ns_resp = client.namespaces().create(ns_name)
print("Created first namespace '%s' : %s" % (ns_name, ns_resp))
# create second namespace
ns_resp = client.namespaces().create(ns_name_2)
print("Created second namespace '%s' : %s" % (ns_name_2, ns_resp))
# get first namespace
ns_props = client.namespaces().get(ns_name)
print("Get first namespace '%s' : %s" % (ns_name, ns_props))
ns_props = client.namespaces().get(ns_name_2)
print("Get second namespace '%s' : %s" % (ns_name_2, ns_props))
# test operations on namespace 'test'
ns = client.namespace(ns_name)
stream_resp = ns.create(stream_name)
print("Create first stream '%s' : %s" % (stream_name, stream_resp))
stream_resp = ns.create(stream_name_2)
print("Create second stream '%s' : %s" % (stream_name_2, stream_resp))
stream_props = ns.get(stream_name)
print("Get first stream '%s' : %s" % (stream_name, stream_props))
stream_props = ns.get(stream_name_2)
print("Get second stream '%s' : %s" % (stream_name_2, stream_props))
kv_client = kv.Client(namespace=ns_name)
table = kv_client.table(stream_name)
num_keys = 10
for i in range(num_keys):
put_resp = table.put_str("key-%s" % i, "value-%s" % i)
print("Successfully added %d keys" % num_keys)
for i in range(10):
get_resp = table.get_str("key-%s" % i)
print("Get response : %s" % get_resp)
print("Successfully retrieved %d keys" % num_keys)
for i in range(10):
del_resp = table.delete_str("key-%s" % i)
print("Delete response : %s" % del_resp)
print("Successfully deleted %d keys" % num_keys)
print("Try to retrieve %d keys again" % num_keys)
for i in range(10):
get_resp = table.get_str("key-%s" % i)
assert get_resp is None
print("All %d keys should not exist anymore" % num_keys)
for i in range(10):
try:
table.delete_str("key-%s" % i)
except KeyNotFoundError:
print("Key 'key-%s' doesn't exist" % i)
del_resp = ns.delete(stream_name)
print("Delete first stream '%s' : %s" % (stream_name, del_resp))
del_resp = ns.delete(stream_name_2)
print("Delete second stream '%s' : %s" % (stream_name_2, del_resp))
del_resp = client.namespaces().delete(ns_name)
print("Delete first namespace '%s' : %s" % (ns_name, del_resp))
del_resp = client.namespaces().delete(ns_name_2)
print("Delete second namespace '%s' : %s" % (ns_name_2, del_resp))
| apache-2.0 |
marxin/youtube-dl | youtube_dl/extractor/podomatic.py | 198 | 2327 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
class PodomaticIE(InfoExtractor):
IE_NAME = 'podomatic'
_VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)'
_TESTS = [
{
'url': 'http://scienceteachingtips.podomatic.com/entry/2009-01-02T16_03_35-08_00',
'md5': '84bb855fcf3429e6bf72460e1eed782d',
'info_dict': {
'id': '2009-01-02T16_03_35-08_00',
'ext': 'mp3',
'uploader': 'Science Teaching Tips',
'uploader_id': 'scienceteachingtips',
'title': '64. When the Moon Hits Your Eye',
'duration': 446,
}
},
{
'url': 'http://ostbahnhof.podomatic.com/entry/2013-11-15T16_31_21-08_00',
'md5': 'd2cf443931b6148e27638650e2638297',
'info_dict': {
'id': '2013-11-15T16_31_21-08_00',
'ext': 'mp3',
'uploader': 'Ostbahnhof / Techno Mix',
'uploader_id': 'ostbahnhof',
'title': 'Einunddreizig',
'duration': 3799,
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
channel = mobj.group('channel')
json_url = (('%s://%s.podomatic.com/entry/embed_params/%s' +
'?permalink=true&rtmp=0') %
(mobj.group('proto'), channel, video_id))
data_json = self._download_webpage(
json_url, video_id, 'Downloading video info')
data = json.loads(data_json)
video_url = data['downloadLink']
if not video_url:
video_url = '%s/%s' % (data['streamer'].replace('rtmp', 'http'), data['mediaLocation'])
uploader = data['podcast']
title = data['title']
thumbnail = data['imageLocation']
duration = int_or_none(data.get('length'), 1000)
return {
'id': video_id,
'url': video_url,
'title': title,
'uploader': uploader,
'uploader_id': channel,
'thumbnail': thumbnail,
'duration': duration,
}
| unlicense |
p0psicles/SickRage | lib/rtorrent/lib/xmlrpc/http.py | 180 | 1195 | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.compat import xmlrpclib
HTTPServerProxy = xmlrpclib.ServerProxy
| gpl-3.0 |
amyvmiwei/kbengine | kbe/res/scripts/common/Lib/distutils/dir_util.py | 7 | 7804 | """distutils.dir_util
Utility functions for manipulating directories and directory trees."""
import os
import errno
from distutils.errors import DistutilsFileError, DistutilsInternalError
from distutils import log
# cache for by mkpath() -- in addition to cheapening redundant calls,
# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
_path_created = {}
# I don't use os.makedirs because a) it's new to Python 1.5.2, and
# b) it blows up if the directory already exists (I want to silently
# succeed in that case).
def mkpath(name, mode=0o777, verbose=1, dry_run=0):
"""Create a directory and any missing ancestor directories.
If the directory already exists (or if 'name' is the empty string, which
means the current directory, which of course exists), then do nothing.
Raise DistutilsFileError if unable to create some directory along the way
(eg. some sub-path exists, but is a file rather than a directory).
If 'verbose' is true, print a one-line summary of each mkdir to stdout.
Return the list of directories actually created.
"""
global _path_created
# Detect a common bug -- name is None
if not isinstance(name, str):
raise DistutilsInternalError(
"mkpath: 'name' must be a string (got %r)" % (name,))
# XXX what's the better way to handle verbosity? print as we create
# each directory in the path (the current behaviour), or only announce
# the creation of the whole path? (quite easy to do the latter since
# we're not using a recursive algorithm)
name = os.path.normpath(name)
created_dirs = []
if os.path.isdir(name) or name == '':
return created_dirs
if _path_created.get(os.path.abspath(name)):
return created_dirs
(head, tail) = os.path.split(name)
tails = [tail] # stack of lone dirs to create
while head and tail and not os.path.isdir(head):
(head, tail) = os.path.split(head)
tails.insert(0, tail) # push next higher dir onto stack
# now 'head' contains the deepest directory that already exists
# (that is, the child of 'head' in 'name' is the highest directory
# that does *not* exist)
for d in tails:
#print "head = %s, d = %s: " % (head, d),
head = os.path.join(head, d)
abs_head = os.path.abspath(head)
if _path_created.get(abs_head):
continue
if verbose >= 1:
log.info("creating %s", head)
if not dry_run:
try:
os.mkdir(head, mode)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
raise DistutilsFileError(
"could not create '%s': %s" % (head, exc.args[-1]))
created_dirs.append(head)
_path_created[abs_head] = 1
return created_dirs
def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0):
"""Create all the empty directories under 'base_dir' needed to put 'files'
there.
'base_dir' is just the a name of a directory which doesn't necessarily
exist yet; 'files' is a list of filenames to be interpreted relative to
'base_dir'. 'base_dir' + the directory portion of every file in 'files'
will be created if it doesn't already exist. 'mode', 'verbose' and
'dry_run' flags are as for 'mkpath()'.
"""
# First get the list of directories to create
need_dir = set()
for file in files:
need_dir.add(os.path.join(base_dir, os.path.dirname(file)))
# Now create them
for dir in sorted(need_dir):
mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, update=0, verbose=1, dry_run=0):
"""Copy an entire directory tree 'src' to a new location 'dst'.
Both 'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
from distutils.file_util import copy_file
if not dry_run and not os.path.isdir(src):
raise DistutilsFileError(
"cannot copy tree '%s': not a directory" % src)
try:
names = os.listdir(src)
except OSError as e:
(errno, errstr) = e
if dry_run:
names = []
else:
raise DistutilsFileError(
"error listing files in '%s': %s" % (src, errstr))
if not dry_run:
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if n.startswith('.nfs'):
# skip NFS rename files
continue
if preserve_symlinks and os.path.islink(src_name):
link_dest = os.readlink(src_name)
if verbose >= 1:
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, verbose=verbose,
dry_run=dry_run)
outputs.append(dst_name)
return outputs
def _build_cmdtuple(path, cmdtuples):
"""Helper for remove_tree()."""
for f in os.listdir(path):
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append((os.remove, real_f))
cmdtuples.append((os.rmdir, path))
def remove_tree(directory, verbose=1, dry_run=0):
"""Recursively remove an entire directory tree.
Any errors are ignored (apart from being reported to stdout if 'verbose'
is true).
"""
global _path_created
if verbose >= 1:
log.info("removing '%s' (and everything under it)", directory)
if dry_run:
return
cmdtuples = []
_build_cmdtuple(directory, cmdtuples)
for cmd in cmdtuples:
try:
cmd[0](cmd[1])
# remove dir from cache if it's already there
abspath = os.path.abspath(cmd[1])
if abspath in _path_created:
del _path_created[abspath]
except OSError as exc:
log.warn("error removing %s: %s", directory, exc)
def ensure_relative(path):
"""Take the full path 'path', and make it a relative path.
This is useful to make 'path' the second argument to os.path.join().
"""
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
| lgpl-3.0 |
borosnborea/SwordGO_app | example/kivymap/.buildozer/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/pulldom.py | 1729 | 2302 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
| gpl-3.0 |
willingc/oh-mainline | vendor/packages/Django/tests/regressiontests/utils/text.py | 20 | 6650 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.test import SimpleTestCase
from django.utils import text
class TestUtilsText(SimpleTestCase):
# In Django 1.6 truncate_words() and truncate_html_words() will be removed
# so these tests will need to be adapted accordingly
def test_truncate_chars(self):
truncator = text.Truncator(
'The quick brown fox jumped over the lazy dog.'
)
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.chars(100)),
self.assertEqual('The quick brown fox ...',
truncator.chars(23)),
self.assertEqual('The quick brown fo.....',
truncator.chars(23, '.....')),
# Ensure that we normalize our unicode data first
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü...', nfc.chars(5))
self.assertEqual('oü...', nfd.chars(5))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A...', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('...', text.Truncator('asdf').chars(1))
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy '
'dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.words(10))
self.assertEqual('The quick brown fox...', truncator.words(4))
self.assertEqual('The quick brown fox[snip]',
truncator.words(4, '[snip]'))
def test_truncate_html_words(self):
truncator = text.Truncator('<p><strong><em>The quick brown fox jumped '
'over the lazy dog.</em></strong></p>')
self.assertEqual('<p><strong><em>The quick brown fox jumped over the '
'lazy dog.</em></strong></p>', truncator.words(10, html=True))
self.assertEqual('<p><strong><em>The quick brown fox...</em>'
'</strong></p>', truncator.words(4, html=True))
self.assertEqual('<p><strong><em>The quick brown fox....</em>'
'</strong></p>', truncator.words(4, '....', html=True))
self.assertEqual('<p><strong><em>The quick brown fox</em></strong>'
'</p>', truncator.words(4, '', html=True))
# Test with new line inside tag
truncator = text.Truncator('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown fox</a> jumped over the lazy dog.</p>')
self.assertEqual('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown...</a></p>', truncator.words(3, '...', html=True))
def test_old_truncate_words(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual('The quick brown fox jumped over the lazy dog.',
text.truncate_words('The quick brown fox jumped over the lazy dog.', 10))
self.assertEqual('The quick brown fox ...',
text.truncate_words('The quick brown fox jumped over the lazy dog.', 4))
self.assertEqual('The quick brown fox ....',
text.truncate_words('The quick brown fox jumped over the lazy dog.', 4, '....'))
self.assertGreater(len(w), 0)
def test_old_truncate_html_words(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual('<p><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>',
text.truncate_html_words('<p><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>', 10))
self.assertEqual('<p><strong><em>The quick brown fox ...</em></strong></p>',
text.truncate_html_words('<p><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>', 4))
self.assertEqual('<p><strong><em>The quick brown fox ....</em></strong></p>',
text.truncate_html_words('<p><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>', 4, '....'))
self.assertEqual('<p><strong><em>The quick brown fox</em></strong></p>',
text.truncate_html_words('<p><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>', 4, None))
self.assertGreater(len(w), 0)
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7),
'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8),
'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10),
'a\n%s\nword' % long_word)
def test_slugify(self):
items = (
('Hello, World!', 'hello-world'),
('spam & eggs', 'spam-eggs'),
)
for value, output in items:
self.assertEqual(text.slugify(value), output)
def test_unescape_entities(self):
items = [
('', ''),
('foo', 'foo'),
('&', '&'),
('&', '&'),
('&', '&'),
('foo & bar', 'foo & bar'),
('foo & bar', 'foo & bar'),
]
for value, output in items:
self.assertEqual(text.unescape_entities(value), output)
| agpl-3.0 |
bjlemke/MINGW-packages | mingw-w64-ca-certificates/certdata2pem.py | 26 | 16059 | #!/usr/bin/python
# vim:set et sw=4:
#
# certdata2pem.py - splits certdata.txt into multiple files
#
# Copyright (C) 2009 Philipp Kern <pkern@debian.org>
# Copyright (C) 2013 Kai Engert <kaie@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
# USA.
import base64
import os.path
import re
import sys
import textwrap
import urllib.request, urllib.parse, urllib.error
import subprocess
objects = []
def printable_serial(obj):
return ".".join([str(x) for x in obj['CKA_SERIAL_NUMBER']])
# Dirty file parser.
in_data, in_multiline, in_obj = False, False, False
field, ftype, value, binval, obj = None, None, None, bytearray(), dict()
for line in open('certdata.txt', 'r', encoding='utf8'):
# Ignore the file header.
if not in_data:
if line.startswith('BEGINDATA'):
in_data = True
continue
# Ignore comment lines.
if line.startswith('#'):
continue
# Empty lines are significant if we are inside an object.
if in_obj and len(line.strip()) == 0:
objects.append(obj)
obj = dict()
in_obj = False
continue
if len(line.strip()) == 0:
continue
if in_multiline:
if not line.startswith('END'):
if ftype == 'MULTILINE_OCTAL':
line = line.strip()
for i in re.finditer(r'\\([0-3][0-7][0-7])', line):
integ = int(i.group(1), 8)
binval.extend((integ).to_bytes(1, sys.byteorder))
obj[field] = binval
else:
value += line
obj[field] = value
continue
in_multiline = False
continue
if line.startswith('CKA_CLASS'):
in_obj = True
line_parts = line.strip().split(' ', 2)
if len(line_parts) > 2:
field, ftype = line_parts[0:2]
value = ' '.join(line_parts[2:])
elif len(line_parts) == 2:
field, ftype = line_parts
value = None
else:
raise NotImplementedError('line_parts < 2 not supported.\n' + line)
if ftype == 'MULTILINE_OCTAL':
in_multiline = True
value = ""
binval = bytearray()
continue
obj[field] = value
if len(list(obj.items())) > 0:
objects.append(obj)
# Build up trust database.
trustmap = dict()
for obj in objects:
if obj['CKA_CLASS'] != 'CKO_NSS_TRUST':
continue
key = obj['CKA_LABEL'] + printable_serial(obj)
trustmap[key] = obj
print(" added trust", key)
# Build up cert database.
certmap = dict()
for obj in objects:
if obj['CKA_CLASS'] != 'CKO_CERTIFICATE':
continue
key = obj['CKA_LABEL'] + printable_serial(obj)
certmap[key] = obj
print(" added cert", key)
def obj_to_filename(obj):
label = obj['CKA_LABEL'][1:-1]
label = label.replace('/', '_')\
.replace(' ', '_')\
.replace('(', '=')\
.replace(')', '=')\
.replace(',', '_')
labelbytes = bytearray()
i = 0
imax = len(label)
while i < imax:
if i < imax-3 and label[i] == '\\' and label[i+1] == 'x':
labelbytes.extend(bytes.fromhex(label[i+2:i+4]))
i += 4
continue
labelbytes.extend(str.encode(label[i]))
i = i+1
continue
label = labelbytes.decode('utf-8')
serial = printable_serial(obj)
return label + ":" + serial
def write_cert_ext_to_file(f, oid, value, public_key):
f.write("[p11-kit-object-v1]\n")
f.write("label: ");
f.write(tobj['CKA_LABEL'])
f.write("\n")
f.write("class: x-certificate-extension\n");
f.write("object-id: " + oid + "\n")
f.write("value: \"" + value + "\"\n")
f.write("modifiable: false\n");
f.write(public_key)
trust_types = {
"CKA_TRUST_DIGITAL_SIGNATURE": "digital-signature",
"CKA_TRUST_NON_REPUDIATION": "non-repudiation",
"CKA_TRUST_KEY_ENCIPHERMENT": "key-encipherment",
"CKA_TRUST_DATA_ENCIPHERMENT": "data-encipherment",
"CKA_TRUST_KEY_AGREEMENT": "key-agreement",
"CKA_TRUST_KEY_CERT_SIGN": "cert-sign",
"CKA_TRUST_CRL_SIGN": "crl-sign",
"CKA_TRUST_SERVER_AUTH": "server-auth",
"CKA_TRUST_CLIENT_AUTH": "client-auth",
"CKA_TRUST_CODE_SIGNING": "code-signing",
"CKA_TRUST_EMAIL_PROTECTION": "email-protection",
"CKA_TRUST_IPSEC_END_SYSTEM": "ipsec-end-system",
"CKA_TRUST_IPSEC_TUNNEL": "ipsec-tunnel",
"CKA_TRUST_IPSEC_USER": "ipsec-user",
"CKA_TRUST_TIME_STAMPING": "time-stamping",
"CKA_TRUST_STEP_UP_APPROVED": "step-up-approved",
}
legacy_trust_types = {
"LEGACY_CKA_TRUST_SERVER_AUTH": "server-auth",
"LEGACY_CKA_TRUST_CODE_SIGNING": "code-signing",
"LEGACY_CKA_TRUST_EMAIL_PROTECTION": "email-protection",
}
legacy_to_real_trust_types = {
"LEGACY_CKA_TRUST_SERVER_AUTH": "CKA_TRUST_SERVER_AUTH",
"LEGACY_CKA_TRUST_CODE_SIGNING": "CKA_TRUST_CODE_SIGNING",
"LEGACY_CKA_TRUST_EMAIL_PROTECTION": "CKA_TRUST_EMAIL_PROTECTION",
}
openssl_trust = {
"CKA_TRUST_SERVER_AUTH": "serverAuth",
"CKA_TRUST_CLIENT_AUTH": "clientAuth",
"CKA_TRUST_CODE_SIGNING": "codeSigning",
"CKA_TRUST_EMAIL_PROTECTION": "emailProtection",
}
for tobj in objects:
if tobj['CKA_CLASS'] == 'CKO_NSS_TRUST':
key = tobj['CKA_LABEL'] + printable_serial(tobj)
print("producing trust for " + key)
trustbits = []
distrustbits = []
openssl_trustflags = []
openssl_distrustflags = []
legacy_trustbits = []
legacy_openssl_trustflags = []
for t in list(trust_types.keys()):
if t in tobj and tobj[t] == 'CKT_NSS_TRUSTED_DELEGATOR':
trustbits.append(t)
if t in openssl_trust:
openssl_trustflags.append(openssl_trust[t])
if t in tobj and tobj[t] == 'CKT_NSS_NOT_TRUSTED':
distrustbits.append(t)
if t in openssl_trust:
openssl_distrustflags.append(openssl_trust[t])
for t in list(legacy_trust_types.keys()):
if t in tobj and tobj[t] == 'CKT_NSS_TRUSTED_DELEGATOR':
real_t = legacy_to_real_trust_types[t]
legacy_trustbits.append(real_t)
if real_t in openssl_trust:
legacy_openssl_trustflags.append(openssl_trust[real_t])
if t in tobj and tobj[t] == 'CKT_NSS_NOT_TRUSTED':
raise NotImplementedError('legacy distrust not supported.\n' + line)
fname = obj_to_filename(tobj)
try:
obj = certmap[key]
except:
obj = None
# optional debug code, that dumps the parsed input to files
#fulldump = "dump-" + fname
#dumpf = open(fulldump, 'w')
#dumpf.write(str(obj));
#dumpf.write(str(tobj));
#dumpf.close();
is_legacy = 0
if 'LEGACY_CKA_TRUST_SERVER_AUTH' in tobj or 'LEGACY_CKA_TRUST_EMAIL_PROTECTION' in tobj or 'LEGACY_CKA_TRUST_CODE_SIGNING' in tobj:
is_legacy = 1
if obj == None:
raise NotImplementedError('found legacy trust without certificate.\n' + line)
legacy_fname = "legacy-default/" + fname + ".crt"
f = open(legacy_fname, 'w')
f.write("# alias=%s\n"%tobj['CKA_LABEL'])
f.write("# trust=" + " ".join(legacy_trustbits) + "\n")
if legacy_openssl_trustflags:
f.write("# openssl-trust=" + " ".join(legacy_openssl_trustflags) + "\n")
f.write("-----BEGIN CERTIFICATE-----\n")
temp_encoded_b64 = base64.b64encode(obj['CKA_VALUE'])
temp_wrapped = textwrap.wrap(temp_encoded_b64.decode(), 64)
f.write("\n".join(temp_wrapped))
f.write("\n-----END CERTIFICATE-----\n")
f.close()
if 'CKA_TRUST_SERVER_AUTH' in tobj or 'CKA_TRUST_EMAIL_PROTECTION' in tobj or 'CKA_TRUST_CODE_SIGNING' in tobj:
legacy_fname = "legacy-disable/" + fname + ".crt"
f = open(legacy_fname, 'w')
f.write("# alias=%s\n"%tobj['CKA_LABEL'])
f.write("# trust=" + " ".join(trustbits) + "\n")
if openssl_trustflags:
f.write("# openssl-trust=" + " ".join(openssl_trustflags) + "\n")
f.write("-----BEGIN CERTIFICATE-----\n")
f.write("\n".join(textwrap.wrap(base64.b64encode(obj['CKA_VALUE']), 64)))
f.write("\n-----END CERTIFICATE-----\n")
f.close()
# don't produce p11-kit output for legacy certificates
continue
pk = ''
cert_comment = ''
if obj != None:
# must extract the public key from the cert, let's use openssl
cert_fname = "cert-" + fname
fc = open(cert_fname, 'w')
fc.write("-----BEGIN CERTIFICATE-----\n")
temp_encoded_b64 = base64.b64encode(obj['CKA_VALUE'])
temp_wrapped = textwrap.wrap(temp_encoded_b64.decode(), 64)
fc.write("\n".join(temp_wrapped))
fc.write("\n-----END CERTIFICATE-----\n")
fc.close();
pk_fname = "pubkey-" + fname
fpkout = open(pk_fname, "w")
dump_pk_command = ["openssl", "x509", "-in", cert_fname, "-noout", "-pubkey"]
subprocess.call(dump_pk_command, stdout=fpkout)
fpkout.close()
with open (pk_fname, "r") as myfile:
pk=myfile.read()
# obtain certificate information suitable as a comment
comment_fname = "comment-" + fname
fcout = open(comment_fname, "w")
comment_command = ["openssl", "x509", "-in", cert_fname, "-noout", "-text"]
subprocess.call(comment_command, stdout=fcout)
fcout.close()
sed_command = ["sed", "--in-place", "s/^/#/", comment_fname]
subprocess.call(sed_command)
with open (comment_fname, "r", errors = 'replace') as myfile:
cert_comment=myfile.read()
fname += ".tmp-p11-kit"
f = open(fname, 'w')
if obj != None:
is_distrusted = False
has_server_trust = False
has_email_trust = False
has_code_trust = False
if 'CKA_TRUST_SERVER_AUTH' in tobj:
if tobj['CKA_TRUST_SERVER_AUTH'] == 'CKT_NSS_NOT_TRUSTED':
is_distrusted = True
elif tobj['CKA_TRUST_SERVER_AUTH'] == 'CKT_NSS_TRUSTED_DELEGATOR':
has_server_trust = True
if 'CKA_TRUST_EMAIL_PROTECTION' in tobj:
if tobj['CKA_TRUST_EMAIL_PROTECTION'] == 'CKT_NSS_NOT_TRUSTED':
is_distrusted = True
elif tobj['CKA_TRUST_EMAIL_PROTECTION'] == 'CKT_NSS_TRUSTED_DELEGATOR':
has_email_trust = True
if 'CKA_TRUST_CODE_SIGNING' in tobj:
if tobj['CKA_TRUST_CODE_SIGNING'] == 'CKT_NSS_NOT_TRUSTED':
is_distrusted = True
elif tobj['CKA_TRUST_CODE_SIGNING'] == 'CKT_NSS_TRUSTED_DELEGATOR':
has_code_trust = True
if is_distrusted:
trust_ext_oid = "1.3.6.1.4.1.3319.6.10.1"
trust_ext_value = "0.%06%0a%2b%06%01%04%01%99w%06%0a%01%04 0%1e%06%08%2b%06%01%05%05%07%03%04%06%08%2b%06%01%05%05%07%03%01%06%08%2b%06%01%05%05%07%03%03"
write_cert_ext_to_file(f, trust_ext_oid, trust_ext_value, pk)
trust_ext_oid = "2.5.29.37"
if has_server_trust:
if has_email_trust:
if has_code_trust:
# server + email + code
trust_ext_value = "0%2a%06%03U%1d%25%01%01%ff%04 0%1e%06%08%2b%06%01%05%05%07%03%04%06%08%2b%06%01%05%05%07%03%01%06%08%2b%06%01%05%05%07%03%03"
else:
# server + email
trust_ext_value = "0 %06%03U%1d%25%01%01%ff%04%160%14%06%08%2b%06%01%05%05%07%03%04%06%08%2b%06%01%05%05%07%03%01"
else:
if has_code_trust:
# server + code
trust_ext_value = "0 %06%03U%1d%25%01%01%ff%04%160%14%06%08%2b%06%01%05%05%07%03%01%06%08%2b%06%01%05%05%07%03%03"
else:
# server
trust_ext_value = "0%16%06%03U%1d%25%01%01%ff%04%0c0%0a%06%08%2b%06%01%05%05%07%03%01"
else:
if has_email_trust:
if has_code_trust:
# email + code
trust_ext_value = "0 %06%03U%1d%25%01%01%ff%04%160%14%06%08%2b%06%01%05%05%07%03%04%06%08%2b%06%01%05%05%07%03%03"
else:
# email
trust_ext_value = "0%16%06%03U%1d%25%01%01%ff%04%0c0%0a%06%08%2b%06%01%05%05%07%03%04"
else:
if has_code_trust:
# code
trust_ext_value = "0%16%06%03U%1d%25%01%01%ff%04%0c0%0a%06%08%2b%06%01%05%05%07%03%03"
else:
# none
trust_ext_value = "0%18%06%03U%1d%25%01%01%ff%04%0e0%0c%06%0a%2b%06%01%04%01%99w%06%0a%10"
# no 2.5.29.37 for neutral certificates
if (is_distrusted or has_server_trust or has_email_trust or has_code_trust):
write_cert_ext_to_file(f, trust_ext_oid, trust_ext_value, pk)
pk = ''
f.write("\n")
f.write("[p11-kit-object-v1]\n")
f.write("label: ");
f.write(tobj['CKA_LABEL'])
f.write("\n")
if is_distrusted:
f.write("x-distrusted: true\n")
elif has_server_trust or has_email_trust or has_code_trust:
f.write("trusted: true\n")
else:
f.write("trusted: false\n")
# requires p11-kit >= 0.23.4
f.write("nss-mozilla-ca-policy: true\n")
f.write("modifiable: false\n");
f.write("-----BEGIN CERTIFICATE-----\n")
temp_encoded_b64 = base64.b64encode(obj['CKA_VALUE'])
temp_wrapped = textwrap.wrap(temp_encoded_b64.decode(), 64)
f.write("\n".join(temp_wrapped))
f.write("\n-----END CERTIFICATE-----\n")
f.write(cert_comment)
f.write("\n")
else:
f.write("[p11-kit-object-v1]\n")
f.write("label: ");
f.write(tobj['CKA_LABEL']);
f.write("\n")
f.write("class: certificate\n")
f.write("certificate-type: x-509\n")
f.write("modifiable: false\n");
f.write("issuer: \"");
f.write(urllib.parse.quote(tobj['CKA_ISSUER']));
f.write("\"\n")
f.write("serial-number: \"");
f.write(urllib.parse.quote(tobj['CKA_SERIAL_NUMBER']));
f.write("\"\n")
if (tobj['CKA_TRUST_SERVER_AUTH'] == 'CKT_NSS_NOT_TRUSTED') or (tobj['CKA_TRUST_EMAIL_PROTECTION'] == 'CKT_NSS_NOT_TRUSTED') or (tobj['CKA_TRUST_CODE_SIGNING'] == 'CKT_NSS_NOT_TRUSTED'):
f.write("x-distrusted: true\n")
f.write("\n\n")
f.close()
print(" -> written as '%s', trust = %s, openssl-trust = %s, distrust = %s, openssl-distrust = %s" % (fname, trustbits, openssl_trustflags, distrustbits, openssl_distrustflags))
| bsd-3-clause |
lxn2/mxnet | tests/python/unittest/test_model_parallel.py | 13 | 1545 | import numpy as np
import mxnet as mx
def reldiff(a, b):
diff = np.sum(np.abs(a - b))
norm = np.sum(np.abs(a))
if diff == 0:
return 0
reldiff = diff / norm
return reldiff
def test_chain():
n = 2
data1 = mx.sym.Variable('data1')
data2 = mx.sym.Variable('data2')
with mx.AttrScope(ctx_group='dev1'):
net = data1 + data2
net = net * 3
with mx.AttrScope(ctx_group='dev2'):
net = net + data1
with mx.Context(mx.cpu(0)):
shape = (4, 5)
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
exec1 = net.bind(mx.cpu(),
args=arr,
args_grad=arr_grad,
group2ctx={'dev1': mx.cpu(0), 'dev2': mx.cpu(1)})
arr[0][:] = 1.0
arr[1][:] = 2.0
arr2 = [a.copyto(mx.cpu()) for a in arr]
arr_grad2 = [a.copyto(mx.cpu()) for a in arr_grad]
exec2 = net.bind(mx.cpu(),
args=arr2,
args_grad=arr_grad2)
# Show the execution plan that involves copynode
print(exec1.debug_str())
exec1.forward()
exec2.forward()
assert reldiff(exec1.outputs[0].asnumpy(), exec2.outputs[0].asnumpy()) < 1e-6
out_grad = mx.nd.empty(shape, mx.cpu(1))
out_grad[:] = 1.0
exec1.backward([out_grad])
exec2.backward([out_grad.copyto(mx.cpu())])
for a, b in zip(arr_grad, arr_grad2):
assert reldiff(a.asnumpy(), b.asnumpy()) < 1e-6
if __name__ == '__main__':
test_chain()
| apache-2.0 |
aidanlister/django | django/contrib/gis/geoip/base.py | 334 | 11859 | import os
import re
import warnings
from ctypes import c_char_p
from django.contrib.gis.geoip.libgeoip import GEOIP_SETTINGS
from django.contrib.gis.geoip.prototypes import (
GeoIP_country_code_by_addr, GeoIP_country_code_by_name,
GeoIP_country_name_by_addr, GeoIP_country_name_by_name,
GeoIP_database_info, GeoIP_delete, GeoIP_lib_version, GeoIP_open,
GeoIP_record_by_addr, GeoIP_record_by_name,
)
from django.core.validators import ipv4_re
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes, force_text
# Regular expressions for recognizing the GeoIP free database editions.
free_regex = re.compile(r'^GEO-\d{3}FREE')
lite_regex = re.compile(r'^GEO-\d{3}LITE')
class GeoIPException(Exception):
pass
class GeoIP(object):
# The flags for GeoIP memory caching.
# GEOIP_STANDARD - read database from filesystem, uses least memory.
#
# GEOIP_MEMORY_CACHE - load database into memory, faster performance
# but uses more memory
#
# GEOIP_CHECK_CACHE - check for updated database. If database has been
# updated, reload filehandle and/or memory cache. This option
# is not thread safe.
#
# GEOIP_INDEX_CACHE - just cache the most frequently accessed index
# portion of the database, resulting in faster lookups than
# GEOIP_STANDARD, but less memory usage than GEOIP_MEMORY_CACHE -
# useful for larger databases such as GeoIP Organization and
# GeoIP City. Note, for GeoIP Country, Region and Netspeed
# databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE
#
# GEOIP_MMAP_CACHE - load database into mmap shared memory ( not available
# on Windows).
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
GEOIP_CHECK_CACHE = 2
GEOIP_INDEX_CACHE = 4
GEOIP_MMAP_CACHE = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4, 8) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API
settings, respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
warnings.warn(
"django.contrib.gis.geoip is deprecated in favor of "
"django.contrib.gis.geoip2 and the MaxMind GeoLite2 database "
"format.", RemovedInDjango20Warning, 2
)
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIPException('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH')
if not path:
raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = GeoIP_open(force_bytes(country_db), cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = GeoIP_open(force_bytes(city_db), cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = GeoIP_open(force_bytes(path), cache)
info = GeoIP_database_info(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
def __del__(self):
# Cleaning any GeoIP file handles lying around.
if GeoIP_delete is None:
return
if self._country:
GeoIP_delete(self._country)
if self._city:
GeoIP_delete(self._city)
def __repr__(self):
version = ''
if GeoIP_lib_version is not None:
version += ' [v%s]' % force_text(GeoIP_lib_version())
return '<%(cls)s%(version)s _country_file="%(country)s", _city_file="%(city)s">' % {
'cls': self.__class__.__name__,
'version': version,
'country': self._country_file,
'city': self._city_file,
}
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIPException('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP only takes bytestrings.
return force_bytes(query)
def city(self, query):
"""
Returns a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
if ipv4_re.match(query):
# If an IP address was passed in
return GeoIP_record_by_addr(self._city, c_char_p(enc_query))
else:
# If a FQDN was passed in.
return GeoIP_record_by_name(self._city, c_char_p(enc_query))
def country_code(self, query):
"Returns the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_code_by_addr(self._country, enc_query)
else:
return GeoIP_country_code_by_name(self._country, enc_query)
else:
return self.city(query)['country_code']
def country_name(self, query):
"Returns the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_name_by_addr(self._country, enc_query)
else:
return GeoIP_country_name_by_name(self._country, enc_query)
else:
return self.city(query)['country_name']
def country(self, query):
"""
Returns a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
return {'country_code': self.country_code(query),
'country_name': self.country_name(query),
}
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Returns a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Returns a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Returns a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def country_info(self):
"Returns information about the GeoIP country database."
if self._country is None:
ci = 'No GeoIP Country data in "%s"' % self._country_file
else:
ci = GeoIP_database_info(self._country)
return ci
@property
def city_info(self):
"Returns information about the GeoIP city database."
if self._city is None:
ci = 'No GeoIP City data in "%s"' % self._city_file
else:
ci = GeoIP_database_info(self._city)
return ci
@property
def info(self):
"Returns information about the GeoIP library and databases in use."
info = ''
if GeoIP_lib_version:
info += 'GeoIP Library:\n\t%s\n' % GeoIP_lib_version()
return info + 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info)
# #### Methods for compatibility w/the GeoIP-Python API. ####
@classmethod
def open(cls, full_path, cache):
return GeoIP(full_path, cache)
def _rec_by_arg(self, arg):
if self._city:
return self.city(arg)
else:
return self.country(arg)
region_by_addr = city
region_by_name = city
record_by_addr = _rec_by_arg
record_by_name = _rec_by_arg
country_code_by_addr = country_code
country_code_by_name = country_code
country_name_by_addr = country_name
country_name_by_name = country_name
| bsd-3-clause |
patrickm/chromium.src | tools/metrics/histograms/pretty_print.py | 2 | 6038 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the histograms.xml file, alphabetizing tags, wrapping text
at 80 chars, enforcing standard attribute ordering, and standardizing
indentation.
This is quite a bit more complicated than just calling tree.toprettyxml();
we need additional customization, like special attribute ordering in tags
and wrapping text nodes, so we implement our own full custom XML pretty-printer.
"""
from __future__ import with_statement
import logging
import os
import shutil
import sys
import xml.dom.minidom
import print_style
sys.path.insert(1, os.path.join(sys.path[0], '..', '..', 'python'))
from google import path_utils
# Import the metrics/common module.
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import diff_util
# Tags whose children we want to alphabetize. The key is the parent tag name,
# and the value is a pair of the tag name of the children we want to sort,
# and a key function that maps each child node to the desired sort key.
ALPHABETIZATION_RULES = {
'histograms': ('histogram', lambda n: n.attributes['name'].value.lower()),
'enums': ('enum', lambda n: n.attributes['name'].value.lower()),
'enum': ('int', lambda n: int(n.attributes['value'].value)),
'fieldtrials': ('fieldtrial', lambda n: n.attributes['name'].value.lower()),
'fieldtrial': ('affected-histogram',
lambda n: n.attributes['name'].value.lower()),
}
class Error(Exception):
pass
def unsafeAppendChild(parent, child):
"""Append child to parent's list of children, ignoring the possibility that it
is already in another node's childNodes list. Requires that the previous
parent of child is discarded (to avoid non-tree DOM graphs).
This can provide a significant speedup as O(n^2) operations are removed (in
particular, each child insertion avoids the need to traverse the old parent's
entire list of children)."""
child.parentNode = None
parent.appendChild(child)
child.parentNode = parent
def TransformByAlphabetizing(node):
"""Transform the given XML by alphabetizing specific node types according to
the rules in ALPHABETIZATION_RULES.
Args:
node: The minidom node to transform.
Returns:
The minidom node, with children appropriately alphabetized. Note that the
transformation is done in-place, i.e. the original minidom tree is modified
directly.
"""
if node.nodeType != xml.dom.minidom.Node.ELEMENT_NODE:
for c in node.childNodes: TransformByAlphabetizing(c)
return node
# Element node with a tag name that we alphabetize the children of?
if node.tagName in ALPHABETIZATION_RULES:
# Put subnodes in a list of node,key pairs to allow for custom sorting.
subtag, key_function = ALPHABETIZATION_RULES[node.tagName]
subnodes = []
last_key = -1
for c in node.childNodes:
if (c.nodeType == xml.dom.minidom.Node.ELEMENT_NODE and
c.tagName == subtag):
last_key = key_function(c)
# Subnodes that we don't want to rearrange use the last node's key,
# so they stay in the same relative position.
subnodes.append( (c, last_key) )
# Sort the subnode list.
subnodes.sort(key=lambda pair: pair[1])
# Re-add the subnodes, transforming each recursively.
while node.firstChild:
node.removeChild(node.firstChild)
for (c, _) in subnodes:
unsafeAppendChild(node, TransformByAlphabetizing(c))
return node
# Recursively handle other element nodes and other node types.
for c in node.childNodes: TransformByAlphabetizing(c)
return node
def PrettyPrint(raw_xml):
"""Pretty-print the given XML.
Args:
raw_xml: The contents of the histograms XML file, as a string.
Returns:
The pretty-printed version.
"""
tree = xml.dom.minidom.parseString(raw_xml)
tree = TransformByAlphabetizing(tree)
return print_style.GetPrintStyle().PrettyPrintNode(tree)
def main():
logging.basicConfig(level=logging.INFO)
presubmit = ('--presubmit' in sys.argv)
histograms_filename = 'histograms.xml'
histograms_backup_filename = 'histograms.before.pretty-print.xml'
# If there is a histograms.xml in the current working directory, use that.
# Otherwise, use the one residing in the same directory as this script.
histograms_dir = os.getcwd()
if not os.path.isfile(os.path.join(histograms_dir, histograms_filename)):
histograms_dir = path_utils.ScriptDir()
histograms_pathname = os.path.join(histograms_dir, histograms_filename)
histograms_backup_pathname = os.path.join(histograms_dir,
histograms_backup_filename)
logging.info('Loading %s...' % os.path.relpath(histograms_pathname))
with open(histograms_pathname, 'rb') as f:
xml = f.read()
# Check there are no CR ('\r') characters in the file.
if '\r' in xml:
logging.info('DOS-style line endings (CR characters) detected - these are '
'not allowed. Please run dos2unix %s' % histograms_filename)
sys.exit(1)
logging.info('Pretty-printing...')
try:
pretty = PrettyPrint(xml)
except Error:
logging.error('Aborting parsing due to fatal errors.')
sys.exit(1)
if xml == pretty:
logging.info('%s is correctly pretty-printed.' % histograms_filename)
sys.exit(0)
if presubmit:
logging.info('%s is not formatted correctly; run pretty_print.py to fix.' %
histograms_filename)
sys.exit(1)
if not diff_util.PromptUserToAcceptDiff(
xml, pretty,
'Is the prettified version acceptable?'):
logging.error('Aborting')
return
logging.info('Creating backup file %s' % histograms_backup_filename)
shutil.move(histograms_pathname, histograms_backup_pathname)
logging.info('Writing new %s file' % histograms_filename)
with open(histograms_pathname, 'wb') as f:
f.write(pretty)
if __name__ == '__main__':
main()
| bsd-3-clause |
huawei-cloud/compass | compass/config_management/utils/config_filter.py | 4 | 3195 | """Module to filter configuration when upddating.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
from compass.config_management.utils import config_reference
class ConfigFilter(object):
"""config filter based on allows and denies rules"""
def __init__(self, allows=['*'], denies=[]):
"""Constructor
:param allows: glob path to copy to the filtered configuration.
:type allows: list of str
:param denies: glob path to remove from the filtered configuration.
:type denies: list of str
"""
self.allows_ = allows
self.denies_ = denies
self._is_valid()
def __repr__(self):
return '%s[allows=%s,denies=%s]' % (
self.__class__.__name__, self.allows_, self.denies_)
def _is_allows_valid(self):
"""Check if allows are valid"""
if not isinstance(self.allows_, list):
raise TypeError(
'allows type is %s but expected type is list: %s' % (
type(self.allows_), self.allows_))
for i, allow in enumerate(self.allows_):
if not isinstance(allow, str):
raise TypeError(
'allows[%s] type is %s but expected type is str: %s' % (
i, type(allow), allow))
def _is_denies_valid(self):
"""Check if denies are valid."""
if not isinstance(self.denies_, list):
raise TypeError(
'denies type is %s but expected type is list: %s' % (
type(self.denies_), self.denies_))
for i, deny in enumerate(self.denies_):
if not isinstance(deny, str):
raise TypeError(
'denies[%s] type is %s but expected type is str: %s' % (
i, type(deny), deny))
def _is_valid(self):
"""Check if config filter is valid."""
self._is_allows_valid()
self._is_denies_valid()
def filter(self, config):
"""Filter config
:param config: configuration to filter.
:type config: dict
:returns: filtered configuration as dict
"""
ref = config_reference.ConfigReference(config)
filtered_ref = config_reference.ConfigReference({})
self._filter_allows(ref, filtered_ref)
self._filter_denies(filtered_ref)
filtered_config = config_reference.get_clean_config(
filtered_ref.config)
logging.debug('filter config %s to %s', config, filtered_config)
return filtered_config
def _filter_allows(self, ref, filtered_ref):
"""copy ref config with the allows to filtered ref."""
for allow in self.allows_:
if not allow:
continue
for sub_key, sub_ref in ref.ref_items(allow):
filtered_ref.setdefault(sub_key).update(sub_ref.config)
def _filter_denies(self, filtered_ref):
"""remove config from filter_ref by denies."""
for deny in self.denies_:
if not deny:
continue
for ref_key in filtered_ref.ref_keys(deny):
del filtered_ref[ref_key]
| apache-2.0 |
otherway/loc-spain | ___unported__/l10n_es_partner_data/__openerp__.py | 2 | 1801 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <jesteve@zikzakmedia.com>
# Copyright (c) 2009 Pablo Rocandio. All Rights Reserved.
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Datos iniciales para módulo base",
"version" : "1.0",
"author" : "Pablo Rocandio, Zikzakmedia SL",
"website" : "www.zikzakmedia.com",
"category" : "Localisation/Europe",
"description": """Añade datos iniciales a las tablas:
* Canales
* Categorías de empresas""",
"license" : "AGPL-3",
"depends" : [
"base",
"crm",
],
"init_xml" : [
"data/data_partner_events.xml", # Canales de comunicación
"data/data_partner_categories.xml", # Categorías de empresas
],
"demo_xml" : [],
"update_xml" :[],
"active": False,
"installable": False
}
| agpl-3.0 |
ch1bo/lime-backend | lib/sublime/sublime_plugin.py | 9 | 3888 | import os
import os.path
import inspect
import traceback
import imp
import sublime
import sys
import importlib
class Command(object):
def is_enabled(self, args=None):
return True
def is_visible(self, args=None):
return True
class ApplicationCommand(Command):
pass
class WindowCommand(Command):
def __init__(self, wnd):
self.window = wnd
def run_(self, kwargs):
if kwargs and 'event' in kwargs:
del kwargs['event']
if kwargs:
self.run(**kwargs)
else:
self.run()
class TextCommand(Command):
def __init__(self, view):
self.view = view
def run__(self, edit_token, kwargs):
if kwargs and 'event' in kwargs:
del kwargs['event']
if kwargs:
self.run(edit_token, **kwargs)
else:
self.run(edit_token)
class EventListener(object):
pass
def fn(fullname):
paths = fullname.split(".")
paths = "/".join(paths)
for p in sys.path:
f = os.path.abspath(os.path.join(p, paths))
if os.path.exists(f):
return f
f += ".py"
if os.path.exists(f):
return f
return None
class __myfinder:
class myloader(object):
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
f = fn(fullname)
if not f.endswith(".py"):
print("new module: %s" % f)
m = imp.new_module(fullname)
m.__path__ = f
sys.modules[fullname] = m
return m
return imp.load_source(fullname, f)
def find_module(self, fullname, path=None):
f = fn(fullname)
if f and "/lime/" in f: # TODO
return self.myloader()
sys.meta_path.append(__myfinder())
def reload_plugin(module):
def cmdname(name):
if name.endswith("Command"):
name = name[:-7]
ret = ""
for c in name:
l = c.lower()
if c != l and len(ret) > 0:
ret += "_"
ret += l
return ret
print("Loading plugin %s" % module)
try:
module = importlib.import_module(module)
for item in inspect.getmembers(module):
if not isinstance(item[1], type(EventListener)):
continue
try:
cmd = cmdname(item[0])
if issubclass(item[1], EventListener):
inst = item[1]()
toadd = getattr(inst, "on_query_context", None)
if toadd:
sublime.OnQueryContextGlue(toadd)
for name in ["on_load"]: # TODO
toadd = getattr(inst, name, None)
if toadd:
sublime.ViewEventGlue(toadd, name)
elif issubclass(item[1], TextCommand):
sublime.register(cmd, sublime.TextCommandGlue(item[1]))
elif issubclass(item[1], WindowCommand):
sublime.register(cmd, sublime.WindowCommandGlue(item[1]))
elif issubclass(item[1], ApplicationCommand):
sublime.register(cmd, sublime.ApplicationCommandGlue(item[1]))
except:
print("Skipping registering %s: %s" % (cmd, sys.exc_info()[1]))
if "plugin_loaded" in dir(module):
module.plugin_loaded()
except:
traceback.print_exc()
class MyLogger:
def __init__(self):
self.data = ""
def flush(self):
sublime.console(self.data)
self.data = ""
def write(self, data):
self.data += str(data)
if data.endswith("\n"):
self.data = self.data[:-1]
self.flush()
sys.stdout = sys.stderr = MyLogger()
| bsd-2-clause |
bbglab/wok | wok/core/flow/reader.py | 1 | 6997 | ###############################################################################
#
# Copyright 2009-2011, Universitat Pompeu Fabra
#
# This file is part of Wok.
#
# Wok is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wok is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses
#
###############################################################################
import os.path
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5+
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
import sys
sys.stderr.write("Failed to import ElementTree from any known place\n")
raise
from wok.config.data import DataElement, Data
from wok.core.flow.model import *
def str_to_bool(s):
s2b = {
"0" : False, "1" : True,
"no" : False, "yes" : True,
"false" : False, "true" : True}
if s in s2b:
return s2b[s]
else:
return False
class FlowReader(object):
def __init__(self, source):
if isinstance(source, basestring):
self.path = os.path.abspath(source)
self.fp = open(source, "r")
else:
self.path = None
self.fp = source
self.__doc = None
def __read_doc(self):
if self.__doc is None:
self.__doc = etree.parse(self.fp)
return self.__doc
def read_meta(self):
doc = self.__read_doc()
root = doc.getroot()
if root.tag != "flow":
raise Exception("<flow> expected but <{}> found".format(xmle.tag))
name = root.attrib.get("name")
library = root.attrib.get("library")
version = root.attrib.get("version")
return (name, library, version)
def read(self):
doc = self.__read_doc()
root = doc.getroot()
flow = self._parse_flow(root)
if self.path:
flow.path = self.path
return flow
def _parse_base_desc(self, xmle, obj):
if "name" not in xmle.attrib:
raise Exception("'name' attribute not found in tag <{}>".format(xmle.tag))
obj.name = xmle.attrib["name"]
obj.title = xmle.findtext("title")
obj.desc = xmle.findtext("desc")
if "enabled" in xmle:
obj.enabled = str_to_bool(xmle.attr["enabled"])
def _parse_base_port(self, xmle, obj):
self._parse_base_desc(xmle, obj)
if "serializer" in xmle.attrib:
obj.serializer = xmle.attrib["serializer"]
if "wsize" in xmle.attrib:
try:
obj.wsize = int(xmle.attrib["wsize"])
except:
raise Exception("At {} {}: 'wsize' should be a number greater than 0".format(xmle.tag, obj.name))
if obj.wsize < 1:
raise Exception("At {} {}: 'wsize' should be a number greater than 0".format(xmle.tag, obj.name))
def _parse_base_module(self, xmle, obj):
self._parse_base_port(xmle, obj)
if "maxpar" in xmle.attrib:
try:
obj.maxpar = int(xmle.attrib["maxpar"])
except:
raise Exception("At {} {}: 'maxpar' should be a number greater than 0".format(xmle.tag, obj.name))
if obj.maxpar < 1:
raise Exception("At {} {}: 'maxpar' should be a number greater than 0".format(xmle.tag, obj.name))
conf_xml = xmle.find("conf")
if conf_xml is not None:
obj.conf = self._parse_conf(conf_xml)
res_xml = xmle.find("resources")
if res_xml is not None:
obj.resources = self._parse_conf(res_xml)
for x in xmle.findall("param"):
obj.params += [self._parse_param(x)]
for x in xmle.findall("in"):
obj.add_in_port(self._parse_port(x))
for x in xmle.findall("out"):
obj.add_out_port(self._parse_port(x))
def _parse_flow(self, xmle):
if xmle.tag != "flow":
raise Exception("<flow> expected but <{}> found".format(xmle.tag))
flow = Flow(name = None)
self._parse_base_module(xmle, flow)
if "library" in xmle.attrib:
flow.library = xmle.attrib["library"]
if "version" in xmle.attrib:
flow.version = xmle.attrib["version"]
for xmle in xmle.findall("module"):
module = self._parse_module(flow, xmle)
# TODO check that there is no other module with the same name
flow.add_module(module)
return flow
def _parse_module(self, flow, xmle):
mod = Module(name = None)
self._parse_base_module(xmle, mod)
if "depends" in xmle.attrib:
depends = [d.strip() for d in xmle.attrib["depends"].split(",")]
mod.depends = [d for d in depends if len(d) > 0]
exec_xml = xmle.find("exec")
if exec_xml is None:
run_xml = xmle.find("run")
if run_xml is None:
flow_ref_xml = xmle.find("flow")
if flow_ref_xml is None:
raise Exception("Missing either <exec>, <run> or <flow> in module {}".format(mod.name))
else:
mod.flow_ref = self._parse_flow_ref(flow, mod, flow_ref_xml)
else:
mod.execution = self._parse_run(mod, run_xml)
else:
mod.execution = self._parse_exec(exec_xml)
return mod
def _parse_param(self, xmle):
raise Exception("Unimplemented")
def _parse_port(self, xmle):
if xmle.tag == "in":
mode = PORT_MODE_IN
elif xmle.tag == "out":
mode = PORT_MODE_OUT
port = Port(name = None, mode = mode)
self._parse_base_port(xmle, port)
if "link" in xmle.attrib:
link = [x.strip() for x in xmle.attrib["link"].split(",")]
port.link = [l for l in link if len(l) > 0]
return port
def _parse_conf(self, xmle):
return Data.from_xmle(xmle)
def _parse_exec(self, xmle):
execution = Exec()
if "launcher" in xmle.attrib:
execution.mode = xmle.attrib["launcher"].lower()
if execution.mode == "python":
execution.mode = "native"
execution.conf = Data.from_xmle(xmle)
return execution
def _parse_run(self, mod, xmle):
if xmle.text is None or len(xmle.text) == 0:
raise Exception("Missing script name for <run> in module {}".format(mod.name))
execution = Exec()
execution.mode = "native"
execution.conf = DataElement()
execution.conf["script_path"] = xmle.text
return execution
def _parse_flow_ref(self, flow, mod, xmle):
if xmle.text is None or len(xmle.text) == 0:
raise Exception("Missing flow name for <flow> in module {}".format(mod.name))
flow_ref = FlowRef()
pos = xmle.text.rfind(".")
if pos == -1 and flow.library is not None:
flow_ref.canonical_name = "{}.{}".format(flow.library, xmle.text)
else:
flow_ref.canonical_name = xmle.text
if "version" in xmle.attrib:
flow_ref.version = xmle.attrib["version"]
return flow_ref
def close(self):
self.fp.close()
| gpl-3.0 |
mzizzi/ansible | lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py | 4 | 27361 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_securitygroup
version_added: "2.1"
short_description: Manage Azure network security groups.
description:
- Create, update or delete a network security group. A security group contains Access Control List (ACL) rules
that allow or deny network traffic to subnets or individual network interfaces. A security group is created
with a set of default security rules and an empty set of security rules. Shape traffic flow by adding
rules to the empty set of security rules.
options:
default_rules:
description:
- The set of default rules automatically added to a security group at creation. In general default
rules will not be modified. Modify rules to shape the flow of traffic to or from a subnet or NIC. See
rules below for the makeup of a rule dict.
required: false
default: null
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
name:
description:
- Name of the security group to operate on.
required: false
default: null
purge_default_rules:
description:
- Remove any existing rules not matching those defined in the default_rules parameter.
default: false
required: false
purge_rules:
description:
- Remove any existing rules not matching those defined in the rules parameters.
default: false
required: false
resource_group:
description:
- Name of the resource group the security group belongs to.
required: true
rules:
description:
- Set of rules shaping traffic flow to or from a subnet or NIC. Each rule is a dictionary.
required: false
default: null
suboptions:
name:
description:
- Unique name for the rule.
required: true
description:
description:
- Short description of the rule's purpose.
protocol:
description: Accepted traffic protocol.
choices:
- Udp
- Tcp
- "*"
default: "*"
source_port_range:
description:
- Port or range of ports from which traffic originates.
default: "*"
destination_port_range:
description:
- Port or range of ports to which traffic is headed.
default: "*"
source_address_prefix:
description:
- IP address or CIDR from which traffic originates.
default: "*"
destination_address_prefix:
description:
- IP address or CIDR to which traffic is headed.
default: "*"
access:
description:
- Whether or not to allow the traffic flow.
choices:
- Allow
- Deny
default: Allow
priority:
description:
- Order in which to apply the rule. Must a unique integer between 100 and 4096 inclusive.
required: true
direction:
description:
- Indicates the direction of the traffic flow.
choices:
- Inbound
- Outbound
default: Inbound
state:
description:
- Assert the state of the security group. Set to 'present' to create or update a security group. Set to
'absent' to remove a security group.
default: present
required: false
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
# Create a security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
purge_rules: yes
rules:
- name: DenySSH
protocol: TCP
destination_port_range: 22
access: Deny
priority: 100
direction: Inbound
- name: 'AllowSSH'
protocol: TCP
source_address_prefix: '174.109.158.0/24'
destination_port_range: 22
access: Allow
priority: 101
direction: Inbound
# Update rules on existing security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
rules:
- name: DenySSH
protocol: TCP
destination_port_range: 22-23
access: Deny
priority: 100
direction: Inbound
- name: AllowSSHFromHome
protocol: TCP
source_address_prefix: '174.109.158.0/24'
destination_port_range: 22-23
access: Allow
priority: 102
direction: Inbound
tags:
testing: testing
delete: on-exit
# Delete security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
state: absent
'''
RETURN = '''
state:
description: Current state of the security group.
returned: always
type: dict
sample: {
"default_rules": [
{
"access": "Allow",
"description": "Allow inbound traffic from all VMs in VNET",
"destination_address_prefix": "VirtualNetwork",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetInBound",
"name": "AllowVnetInBound",
"priority": 65000,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "VirtualNetwork",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow inbound traffic from azure load balancer",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowAzureLoadBalancerInBound",
"name": "AllowAzureLoadBalancerInBound",
"priority": 65001,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "AzureLoadBalancer",
"source_port_range": "*"
},
{
"access": "Deny",
"description": "Deny all inbound traffic",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllInBound",
"name": "DenyAllInBound",
"priority": 65500,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow outbound traffic from all VMs to all VMs in VNET",
"destination_address_prefix": "VirtualNetwork",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetOutBound",
"name": "AllowVnetOutBound",
"priority": 65000,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "VirtualNetwork",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow outbound traffic from all VMs to Internet",
"destination_address_prefix": "Internet",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowInternetOutBound",
"name": "AllowInternetOutBound",
"priority": 65001,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Deny",
"description": "Deny all outbound traffic",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllOutBound",
"name": "DenyAllOutBound",
"priority": 65500,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
}
],
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup",
"location": "westus",
"name": "mysecgroup",
"network_interfaces": [],
"rules": [
{
"access": "Deny",
"description": null,
"destination_address_prefix": "*",
"destination_port_range": "22",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/DenySSH",
"name": "DenySSH",
"priority": 100,
"protocol": "Tcp",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Allow",
"description": null,
"destination_address_prefix": "*",
"destination_port_range": "22",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/AllowSSH",
"name": "AllowSSH",
"priority": 101,
"protocol": "Tcp",
"provisioning_state": "Succeeded",
"source_address_prefix": "174.109.158.0/24",
"source_port_range": "*"
}
],
"subnets": [],
"tags": {
"delete": "on-exit",
"foo": "bar",
"testing": "testing"
},
"type": "Microsoft.Network/networkSecurityGroups"
}
''' # NOQA
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureHttpError
from azure.mgmt.network.models import NetworkSecurityGroup, SecurityRule
from azure.mgmt.network.models.network_management_client_enums import (SecurityRuleAccess,
SecurityRuleDirection,
SecurityRuleProtocol)
except ImportError:
# This is handled in azure_rm_common
pass
def validate_rule(rule, rule_type=None):
'''
Apply defaults to a rule dictionary and check that all values are valid.
:param rule: rule dict
:param rule_type: Set to 'default' if the rule is part of the default set of rules.
:return: None
'''
if not rule.get('name'):
raise Exception("Rule name value is required.")
priority = rule.get('priority', None)
if not priority:
raise Exception("Rule priority is required.")
if not isinstance(priority, (int, long)):
raise Exception("Rule priority attribute must be an integer.")
if rule_type != 'default' and (priority < 100 or priority > 4096):
raise Exception("Rule priority must be between 100 and 4096")
if not rule.get('access'):
rule['access'] = 'Allow'
access_names = [member.value for member in SecurityRuleAccess]
if rule['access'] not in access_names:
raise Exception("Rule access must be one of [{0}]".format(', '.join(access_names)))
if not rule.get('destination_address_prefix'):
rule['destination_address_prefix'] = '*'
if not rule.get('source_address_prefix'):
rule['source_address_prefix'] = '*'
if not rule.get('protocol'):
rule['protocol'] = '*'
protocol_names = [member.value for member in SecurityRuleProtocol]
if rule['protocol'] not in protocol_names:
raise Exception("Rule protocol must be one of [{0}]".format(', '.join(protocol_names)))
if not rule.get('direction'):
rule['direction'] = 'Inbound'
direction_names = [member.value for member in SecurityRuleDirection]
if rule['direction'] not in direction_names:
raise Exception("Rule direction must be one of [{0}]".format(', '.join(direction_names)))
if not rule.get('source_port_range'):
rule['source_port_range'] = '*'
if not rule.get('destination_port_range'):
rule['destination_port_range'] = '*'
def compare_rules(r, rule):
matched = False
changed = False
if r['name'] == rule['name']:
matched = True
if rule.get('description', None) != r['description']:
changed = True
r['description'] = rule['description']
if rule['protocol'] != r['protocol']:
changed = True
r['protocol'] = rule['protocol']
if str(rule['source_port_range']) != str(r['source_port_range']):
changed = True
r['source_port_range'] = str(rule['source_port_range'])
if str(rule['destination_port_range']) != str(r['destination_port_range']):
changed = True
r['destination_port_range'] = str(rule['destination_port_range'])
if rule['access'] != r['access']:
changed = True
r['access'] = rule['access']
if rule['priority'] != r['priority']:
changed = True
r['priority'] = rule['priority']
if rule['direction'] != r['direction']:
changed = True
r['direction'] = rule['direction']
return matched, changed
def create_rule_instance(rule):
'''
Create an instance of SecurityRule from a dict.
:param rule: dict
:return: SecurityRule
'''
return SecurityRule(
rule['protocol'],
rule['source_address_prefix'],
rule['destination_address_prefix'],
rule['access'],
rule['direction'],
id=rule.get('id', None),
description=rule.get('description', None),
source_port_range=rule.get('source_port_range', None),
destination_port_range=rule.get('destination_port_range', None),
priority=rule.get('priority', None),
provisioning_state=rule.get('provisioning_state', None),
name=rule.get('name', None),
etag=rule.get('etag', None)
)
def create_rule_dict_from_obj(rule):
'''
Create a dict from an instance of a SecurityRule.
:param rule: SecurityRule
:return: dict
'''
return dict(
id=rule.id,
name=rule.name,
description=rule.description,
protocol=rule.protocol,
source_port_range=rule.source_port_range,
destination_port_range=rule.destination_port_range,
source_address_prefix=rule.source_address_prefix,
destination_address_prefix=rule.destination_address_prefix,
access=rule.access,
priority=rule.priority,
direction=rule.direction,
provisioning_state=rule.provisioning_state,
etag=rule.etag
)
def create_network_security_group_dict(nsg):
results = dict(
id=nsg.id,
name=nsg.name,
type=nsg.type,
location=nsg.location,
tags=nsg.tags,
)
results['rules'] = []
if nsg.security_rules:
for rule in nsg.security_rules:
results['rules'].append(create_rule_dict_from_obj(rule))
results['default_rules'] = []
if nsg.default_security_rules:
for rule in nsg.default_security_rules:
results['default_rules'].append(create_rule_dict_from_obj(rule))
results['network_interfaces'] = []
if nsg.network_interfaces:
for interface in nsg.network_interfaces:
results['network_interfaces'].append(interface.id)
results['subnets'] = []
if nsg.subnets:
for subnet in nsg.subnets:
results['subnets'].append(subnet.id)
return results
class AzureRMSecurityGroup(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
default_rules=dict(type='list'),
location=dict(type='str'),
name=dict(type='str', required=True),
purge_default_rules=dict(type='bool', default=False),
purge_rules=dict(type='bool', default=False),
resource_group=dict(required=True, type='str'),
rules=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
self.default_rules = None
self.location = None
self.name = None
self.purge_default_rules = None
self.purge_rules = None
self.resource_group = None
self.rules = None
self.state = None
self.tags = None
self.results = dict(
changed=False,
state=dict()
)
super(AzureRMSecurityGroup, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
changed = False
results = dict()
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.rules:
for rule in self.rules:
try:
validate_rule(rule)
except Exception as exc:
self.fail("Error validating rule {0} - {1}".format(rule, str(exc)))
if self.default_rules:
for rule in self.default_rules:
try:
validate_rule(rule, 'default')
except Exception as exc:
self.fail("Error validating default rule {0} - {1}".format(rule, str(exc)))
try:
nsg = self.network_client.network_security_groups.get(self.resource_group, self.name)
results = create_network_security_group_dict(nsg)
self.log("Found security group:")
self.log(results, pretty_print=True)
self.check_provisioning_state(nsg, self.state)
if self.state == 'present':
pass
elif self.state == 'absent':
self.log("CHANGED: security group found but state is 'absent'")
changed = True
except CloudError:
if self.state == 'present':
self.log("CHANGED: security group not found and state is 'present'")
changed = True
if self.state == 'present' and not changed:
# update the security group
self.log("Update security group {0}".format(self.name))
if self.rules:
for rule in self.rules:
rule_matched = False
for r in results['rules']:
match, changed = compare_rules(r, rule)
if changed:
changed = True
if match:
rule_matched = True
if not rule_matched:
changed = True
results['rules'].append(rule)
if self.purge_rules:
new_rules = []
for rule in results['rules']:
for r in self.rules:
if rule['name'] == r['name']:
new_rules.append(rule)
results['rules'] = new_rules
if self.default_rules:
for rule in self.default_rules:
rule_matched = False
for r in results['default_rules']:
match, changed = compare_rules(r, rule)
if changed:
changed = True
if match:
rule_matched = True
if not rule_matched:
changed = True
results['default_rules'].append(rule)
if self.purge_default_rules:
new_default_rules = []
for rule in results['default_rules']:
for r in self.default_rules:
if rule['name'] == r['name']:
new_default_rules.append(rule)
results['default_rules'] = new_default_rules
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
self.results['state'] = self.create_or_update(results)
elif self.state == 'present' and changed:
# create the security group
self.log("Create security group {0}".format(self.name))
if not self.location:
self.fail("Parameter error: location required when creating a security group.")
results['name'] = self.name
results['location'] = self.location
results['rules'] = []
results['default_rules'] = []
results['tags'] = {}
if self.rules:
results['rules'] = self.rules
if self.default_rules:
results['default_rules'] = self.default_rules
if self.tags:
results['tags'] = self.tags
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
self.results['state'] = self.create_or_update(results)
elif self.state == 'absent' and changed:
self.log("Delete security group {0}".format(self.name))
self.results['changed'] = changed
self.results['state'] = dict()
if not self.check_mode:
self.delete()
# the delete does not actually return anything. if no exception, then we'll assume
# it worked.
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update(self, results):
parameters = NetworkSecurityGroup()
if results.get('rules'):
parameters.security_rules = []
for rule in results.get('rules'):
parameters.security_rules.append(create_rule_instance(rule))
if results.get('default_rules'):
parameters.default_security_rules = []
for rule in results.get('default_rules'):
parameters.default_security_rules.append(create_rule_instance(rule))
parameters.tags = results.get('tags')
parameters.location = results.get('location')
try:
poller = self.network_client.network_security_groups.create_or_update(self.resource_group,
self.name,
parameters)
result = self.get_poller_result(poller)
except AzureHttpError as exc:
self.fail("Error creating/updating security group {0} - {1}".format(self.name, str(exc)))
return create_network_security_group_dict(result)
def delete(self):
try:
poller = self.network_client.network_security_groups.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
except AzureHttpError as exc:
raise Exception("Error deleting security group {0} - {1}".format(self.name, str(exc)))
return result
def main():
AzureRMSecurityGroup()
if __name__ == '__main__':
main()
| gpl-3.0 |
pyfisch/servo | components/script/dom/bindings/codegen/parser/tests/test_exposed_extended_attribute.py | 127 | 6466 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
[PrimaryGlobal] interface Foo {};
[Global=(Bar1,Bar2)] interface Bar {};
[Global=Baz2] interface Baz {};
[Exposed=(Foo,Bar1)]
interface Iface {
void method1();
[Exposed=Bar1]
readonly attribute any attr;
};
[Exposed=Foo]
partial interface Iface {
void method2();
};
""")
results = parser.finish()
harness.check(len(results), 5, "Should know about five things");
iface = results[3]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should have an interface here");
members = iface.members
harness.check(len(members), 3, "Should have three members")
harness.ok(members[0].exposureSet == set(["Foo", "Bar"]),
"method1 should have the right exposure set")
harness.ok(members[0]._exposureGlobalNames == set(["Foo", "Bar1"]),
"method1 should have the right exposure global names")
harness.ok(members[1].exposureSet == set(["Bar"]),
"attr should have the right exposure set")
harness.ok(members[1]._exposureGlobalNames == set(["Bar1"]),
"attr should have the right exposure global names")
harness.ok(members[2].exposureSet == set(["Foo"]),
"method2 should have the right exposure set")
harness.ok(members[2]._exposureGlobalNames == set(["Foo"]),
"method2 should have the right exposure global names")
harness.ok(iface.exposureSet == set(["Foo", "Bar"]),
"Iface should have the right exposure set")
harness.ok(iface._exposureGlobalNames == set(["Foo", "Bar1"]),
"Iface should have the right exposure global names")
parser = parser.reset()
parser.parse("""
[PrimaryGlobal] interface Foo {};
[Global=(Bar1,Bar2)] interface Bar {};
[Global=Baz2] interface Baz {};
interface Iface2 {
void method3();
};
""")
results = parser.finish()
harness.check(len(results), 4, "Should know about four things");
iface = results[3]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should have an interface here");
members = iface.members
harness.check(len(members), 1, "Should have one member")
harness.ok(members[0].exposureSet == set(["Foo"]),
"method3 should have the right exposure set")
harness.ok(members[0]._exposureGlobalNames == set(["Foo"]),
"method3 should have the right exposure global names")
harness.ok(iface.exposureSet == set(["Foo"]),
"Iface2 should have the right exposure set")
harness.ok(iface._exposureGlobalNames == set(["Foo"]),
"Iface2 should have the right exposure global names")
parser = parser.reset()
parser.parse("""
[PrimaryGlobal] interface Foo {};
[Global=(Bar1,Bar2)] interface Bar {};
[Global=Baz2] interface Baz {};
[Exposed=Foo]
interface Iface3 {
void method4();
};
[Exposed=(Foo,Bar1)]
interface Mixin {
void method5();
};
Iface3 implements Mixin;
""")
results = parser.finish()
harness.check(len(results), 6, "Should know about six things");
iface = results[3]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should have an interface here");
members = iface.members
harness.check(len(members), 2, "Should have two members")
harness.ok(members[0].exposureSet == set(["Foo"]),
"method4 should have the right exposure set")
harness.ok(members[0]._exposureGlobalNames == set(["Foo"]),
"method4 should have the right exposure global names")
harness.ok(members[1].exposureSet == set(["Foo", "Bar"]),
"method5 should have the right exposure set")
harness.ok(members[1]._exposureGlobalNames == set(["Foo", "Bar1"]),
"method5 should have the right exposure global names")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Exposed=Foo]
interface Bar {
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on invalid Exposed value on interface.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Bar {
[Exposed=Foo]
readonly attribute bool attr;
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on invalid Exposed value on attribute.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Bar {
[Exposed=Foo]
void operation();
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on invalid Exposed value on operation.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Bar {
[Exposed=Foo]
const long constant = 5;
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on invalid Exposed value on constant.")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global] interface Foo {};
[Global] interface Bar {};
[Exposed=Foo]
interface Baz {
[Exposed=Bar]
void method();
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on member exposed where its interface is not.")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global] interface Foo {};
[Global] interface Bar {};
[Exposed=Foo]
interface Baz {
void method();
};
[Exposed=Bar]
interface Mixin {};
Baz implements Mixin;
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown on LHS of implements being exposed where RHS is not.")
| mpl-2.0 |
Xeralux/tensorflow | tensorflow/python/keras/preprocessing/text/__init__.py | 4 | 1227 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils for text data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.preprocessing.text import hashing_trick
from tensorflow.python.keras._impl.keras.preprocessing.text import one_hot
from tensorflow.python.keras._impl.keras.preprocessing.text import text_to_word_sequence
from tensorflow.python.keras._impl.keras.preprocessing.text import Tokenizer
del absolute_import
del division
del print_function
| apache-2.0 |
nacc/autotest | client/tests/kvm/tests/pci_hotplug.py | 1 | 8582 | import re
from autotest.client.shared import error
from autotest.client.virt import virt_utils, virt_vm, aexpect
def run_pci_hotplug(test, params, env):
"""
Test hotplug of PCI devices.
(Elements between [] are configurable test parameters)
1) PCI add a deivce (NIC / block)
2) Compare output of monitor command 'info pci'.
3) Compare output of guest command [reference_cmd].
4) Verify whether pci_model is shown in [pci_find_cmd].
5) Check whether the newly added PCI device works fine.
6) PCI delete the device, verify whether could remove the PCI device.
@param test: KVM test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
# Modprobe the module if specified in config file
module = params.get("modprobe_module")
if module:
session.cmd("modprobe %s" % module)
# Get output of command 'info pci' as reference
info_pci_ref = vm.monitor.info("pci")
# Get output of command as reference
reference = session.cmd_output(params.get("reference_cmd"))
tested_model = params.get("pci_model")
test_type = params.get("pci_type")
image_format = params.get("image_format_stg")
# Probe qemu to verify what is the supported syntax for PCI hotplug
cmd_output = vm.monitor.cmd("?")
if len(re.findall("\ndevice_add", cmd_output)) > 0:
cmd_type = "device_add"
elif len(re.findall("\npci_add", cmd_output)) > 0:
cmd_type = "pci_add"
else:
raise error.TestError("Unknow version of qemu")
# Determine syntax of drive hotplug
# __com.redhat_drive_add == qemu-kvm-0.12 on RHEL 6
if len(re.findall("\n__com.redhat_drive_add", cmd_output)) > 0:
drive_cmd_type = "__com.redhat_drive_add"
# drive_add == qemu-kvm-0.13 onwards
elif len(re.findall("\ndrive_add", cmd_output)) > 0:
drive_cmd_type = "drive_add"
else:
raise error.TestError("Unknow version of qemu")
# Probe qemu for a list of supported devices
devices_support = vm.monitor.cmd("%s ?" % cmd_type)
if cmd_type == "pci_add":
if test_type == "nic":
pci_add_cmd = "pci_add pci_addr=auto nic model=%s" % tested_model
elif test_type == "block":
image_params = params.object_params("stg")
image_filename = virt_utils.get_image_filename(image_params,
test.bindir)
pci_add_cmd = ("pci_add pci_addr=auto storage file=%s,if=%s" %
(image_filename, tested_model))
# Execute pci_add (should be replaced by a proper monitor method call)
add_output = vm.monitor.cmd(pci_add_cmd)
if not "OK domain" in add_output:
raise error.TestFail("Add PCI device failed. "
"Monitor command is: %s, Output: %r" %
(pci_add_cmd, add_output))
after_add = vm.monitor.info("pci")
elif cmd_type == "device_add":
driver_id = test_type + "-" + virt_utils.generate_random_id()
device_id = test_type + "-" + virt_utils.generate_random_id()
if test_type == "nic":
if tested_model == "virtio":
tested_model = "virtio-net-pci"
pci_add_cmd = "device_add id=%s,driver=%s" % (device_id,
tested_model)
elif test_type == "block":
image_params = params.object_params("stg")
image_filename = virt_utils.get_image_filename(image_params,
test.bindir)
controller_model = None
if tested_model == "virtio":
tested_model = "virtio-blk-pci"
if tested_model == "scsi":
tested_model = "scsi-disk"
controller_model = "lsi53c895a"
if len(re.findall(controller_model, devices_support)) == 0:
raise error.TestError("scsi controller device (%s) not "
"supported by qemu" %
controller_model)
if controller_model is not None:
controller_id = "controller-" + device_id
controller_add_cmd = ("device_add %s,id=%s" %
(controller_model, controller_id))
vm.monitor.cmd(controller_add_cmd)
if drive_cmd_type == "drive_add":
driver_add_cmd = ("drive_add auto "
"file=%s,if=none,id=%s,format=%s" %
(image_filename, driver_id, image_format))
elif drive_cmd_type == "__com.redhat_drive_add":
driver_add_cmd = ("__com.redhat_drive_add "
"file=%s,format=%s,id=%s" %
(image_filename, image_format, driver_id))
pci_add_cmd = ("device_add id=%s,driver=%s,drive=%s" %
(device_id, tested_model, driver_id))
vm.monitor.cmd(driver_add_cmd)
# Check if the device is support in qemu
if len(re.findall(tested_model, devices_support)) > 0:
add_output = vm.monitor.cmd(pci_add_cmd)
else:
raise error.TestError("%s doesn't support device: %s" %
(cmd_type, tested_model))
after_add = vm.monitor.info("pci")
if not device_id in after_add:
raise error.TestFail("Add device failed. Monitor command is: %s"
". Output: %r" % (pci_add_cmd, add_output))
# Define a helper function to delete the device
def pci_del(ignore_failure=False):
if cmd_type == "pci_add":
result_domain, bus, slot, function = add_output.split(',')
domain = int(result_domain.split()[2])
bus = int(bus.split()[1])
slot = int(slot.split()[1])
pci_addr = "%x:%x:%x" % (domain, bus, slot)
cmd = "pci_del pci_addr=%s" % pci_addr
elif cmd_type == "device_add":
cmd = "device_del %s" % device_id
# This should be replaced by a proper monitor method call
vm.monitor.cmd(cmd)
def device_removed():
after_del = vm.monitor.info("pci")
return after_del != after_add
if (not virt_utils.wait_for(device_removed, 10, 0, 1)
and not ignore_failure):
raise error.TestFail("Failed to hot remove PCI device: %s. "
"Monitor command: %s" %
(tested_model, cmd))
try:
# Compare the output of 'info pci'
if after_add == info_pci_ref:
raise error.TestFail("No new PCI device shown after executing "
"monitor command: 'info pci'")
# Define a helper function to compare the output
def new_shown():
o = session.cmd_output(params.get("reference_cmd"))
return o != reference
secs = int(params.get("wait_secs_for_hook_up"))
if not virt_utils.wait_for(new_shown, 30, secs, 3):
raise error.TestFail("No new device shown in output of command "
"executed inside the guest: %s" %
params.get("reference_cmd"))
# Define a helper function to catch PCI device string
def find_pci():
o = session.cmd_output(params.get("find_pci_cmd"))
return params.get("match_string") in o
if not virt_utils.wait_for(find_pci, 30, 3, 3):
raise error.TestFail("PCI %s %s device not found in guest. "
"Command was: %s" %
(tested_model, test_type,
params.get("find_pci_cmd")))
# Test the newly added device
try:
session.cmd(params.get("pci_test_cmd"))
except aexpect.ShellError, e:
raise error.TestFail("Check for %s device failed after PCI "
"hotplug. Output: %r" % (test_type, e.output))
session.close()
except Exception:
pci_del(ignore_failure=True)
raise
else:
pci_del()
| gpl-2.0 |
megarcia/WxCD | source/process_NCEI_04b.py | 2 | 3913 | """
Python script 'process_NCEI_04b.py'
by Matthew Garcia, PhD student
Dept. of Forest and Wildlife Ecology
University of Wisconsin - Madison
matt.e.garcia@gmail.com
Copyright (C) 2015-2016 by Matthew Garcia
Licensed Gnu GPL v3; see 'LICENSE_GnuGPLv3.txt' for complete terms
Send questions, bug reports, any related requests to matt.e.garcia@gmail.com
See also 'README.md', 'DISCLAIMER.txt', 'CITATION.txt', 'ACKNOWLEDGEMENTS.txt'
Treat others as you would be treated. Pay it forward. Valar dohaeris.
PURPOSE: Calculating statistics (mean/stdev/trend/p-value over analysis period)
for aggregated climatological grids. Numerous variables are addressed.
DEPENDENCIES: h5py, numpy, scipy.stats
USAGE: '$ python process_NCEI_04b.py 1984 2013 ./analyses'
INPUT: '.h5' output file from process_NCEI_04a.py
(with the naming convention
'analyses/[YYYY]-[YYYY]_derived_clim_grids.h5')
OUTPUT: Same '.h5' file with new calculated statistics datacubes
"""
import sys
import datetime
import h5py as hdf
import numpy as np
from scipy.stats import pearsonr
def message(char_string):
"""
prints a string to the terminal and flushes the buffer
"""
print char_string
sys.stdout.flush()
return
def regress(ny, yvals):
xvals = np.arange(0, ny)
coeffs = np.polyfit(xvals, yvals, 1)
corr, sig = pearsonr(xvals, yvals)
return coeffs[0], corr, sig
def write_to_file(h5file, gvar, gdata):
if gvar in h5file.keys():
del h5file[gvar]
h5file.create_dataset(gvar, data=gdata, dtype=np.float32,
compression='gzip')
message('- saved %s %s' % (gvar, str(gdata.shape)))
return
message(' ')
message('process_NCEI_04b.py started at %s' %
datetime.datetime.now().isoformat())
message(' ')
#
if len(sys.argv) < 4:
message('input warning: no input directory indicated, using ./analyses')
path = './analyses'
else:
path = sys.argv[3]
#
if len(sys.argv) < 3:
message('no dates specified, analyzing 1984-2013 period')
year_begin = 1984
year_end = 2013
else:
year_begin = int(sys.argv[1])
year_end = int(sys.argv[2])
#
h5fname = '%s/%d-%d_derived_clim_grids.h5' % (path, year_begin, year_end)
message('extracting variable information from %s' % h5fname)
with hdf.File(h5fname, 'r') as h5infile:
varnames = h5infile.keys()
gridvarnames = [var for var in varnames if var[:6] == 'grids_']
ngridvars = len(gridvarnames)
message('- found %d collections of climatological grids' % ngridvars)
message(' ')
#
message('calculating statistics grids and writing to %s' % h5fname)
with hdf.File(h5fname, 'r+') as h5outfile:
if 'stats_order' not in h5outfile['meta'].keys():
h5outfile.create_dataset('meta/stats_order',
data='mean, stdev, min, max, trend, \
pearson r, p value')
message('- 1 metadata item saved')
#
for i, gridvarname in enumerate(gridvarnames):
statsvarname = 'stats_%s' % gridvarname[6:]
with hdf.File(h5fname, 'r') as h5infile:
gridvar = np.copy(h5infile[gridvarname])
message('- read %s %s' % (gridvarname, str(np.shape(gridvar))))
nyears, nrows, ncols = np.shape(gridvar)
statscube = np.zeros((7, nrows, ncols))
statscube[0, :, :] = np.mean(gridvar, axis=0)
statscube[1, :, :] = np.std(gridvar, axis=0)
statscube[2, :, :] = np.min(gridvar, axis=0)
statscube[3, :, :] = np.max(gridvar, axis=0)
for j in range(nrows):
for i in range(ncols):
statscube[4, j, i], statscube[5, j, i], statscube[6, j, i] = \
regress(nyears, gridvar[:, j, i])
with hdf.File(h5fname, 'r+') as h5outfile:
write_to_file(h5outfile, statsvarname, statscube)
message(' ')
#
message('process_NCEI_04b.py completed at %s' %
datetime.datetime.now().isoformat())
message(' ')
sys.exit(0)
# end process_NCEI_04b.py
| gpl-3.0 |
manassolanki/erpnext | erpnext/buying/doctype/supplier_scorecard_variable/test_supplier_scorecard_variable.py | 19 | 1592 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.buying.doctype.supplier_scorecard_variable.supplier_scorecard_variable import VariablePathNotFound
class TestSupplierScorecardVariable(unittest.TestCase):
def test_variable_exist(self):
for d in test_existing_variables:
my_doc = frappe.get_doc("Supplier Scorecard Variable", d.get("name"))
self.assertEqual(my_doc.param_name, d.get('param_name'))
self.assertEqual(my_doc.variable_label, d.get('variable_label'))
self.assertEqual(my_doc.path, d.get('path'))
def test_path_exists(self):
for d in test_good_variables:
if frappe.db.exists(d):
frappe.delete_doc(d.get("doctype"), d.get("name"))
frappe.get_doc(d).insert()
for d in test_bad_variables:
self.assertRaises(VariablePathNotFound,frappe.get_doc(d).insert)
test_existing_variables = [
{
"param_name":"total_accepted_items",
"name":"Total Accepted Items",
"doctype":"Supplier Scorecard Variable",
"variable_label":"Total Accepted Items",
"path":"get_total_accepted_items"
},
]
test_good_variables = [
{
"param_name":"good_variable1",
"name":"Good Variable 1",
"doctype":"Supplier Scorecard Variable",
"variable_label":"Good Variable 1",
"path":"get_total_accepted_items"
},
]
test_bad_variables = [
{
"param_name":"fake_variable1",
"name":"Fake Variable 1",
"doctype":"Supplier Scorecard Variable",
"variable_label":"Fake Variable 1",
"path":"get_fake_variable1"
},
] | gpl-3.0 |
cshtarkov/autobump | tests/test_java_handler.py | 1 | 10110 | import os
import tempfile
import unittest
from autobump.handlers import java_ast
from autobump.handlers import java_native
class TestJavaHandlerBase(unittest.TestCase):
"""Used to set up a simple Java codebase in a temporary
location.
Does not contain any actual tests. Test cases
are meant to inherit from this class so that they
all have a non-trivial fixture."""
@classmethod
def setUpClass(cls):
sources = [
("packageX/ClassA.java",
"""
package packageX;
public class ClassA {
public void returnsVoid() {}
public ClassB returnsB(ClassB b) { return b; }
private void privateReturnsVoid() {}
}
"""),
("packageX/ClassB.java",
"""
package packageX;
public class ClassB {
private static final int ONE = 1;
public static final int FIVE = 5;
public ClassA returnsB(ClassA b) { return b; }
}
"""),
("packageY/ClassC.java",
"""
package packageY;
import packageX.ClassA;
import packageX.ClassB;
public class ClassC extends ClassA {
public void overloaded(int a) {}
public void overloaded(int a, ClassA b) {}
public void overloaded(int a, ClassB b) {}
public class Inner {
}
}
"""),
("packageY/InterfaceD.java",
"""
package packageY;
public interface InterfaceD {
public static final boolean INTERFACE_D = true;
}
"""),
("packageY/InterfaceE.java",
"""
package packageY;
public interface InterfaceE {
}
"""),
("packageY/InterfaceX.java",
"""
package packageY;
public interface InterfaceX extends InterfaceD, InterfaceE {
}
"""),
("packageY/ClassD.java",
"""
package packageY;
import packageX.ClassA;
public class ClassD extends ClassC implements InterfaceD {
public static void acceptsClassD(ClassD p) {}
public static void acceptsIfaceD(InterfaceD p) {}
public static void acceptsIfaceE(InterfaceE p) {}
public static void acceptsIfaceX(InterfaceX p) {}
public static void acceptsClassA(ClassA p) {}
public static void acceptsClassC(ClassC p) {}
public static void acceptsArrayClassC(ClassC[] p) {}
public static void acceptsArrayClassA(ClassA[] p) {}
}
""")
]
cls.dir_handle = tempfile.TemporaryDirectory()
cls.dir = cls.dir_handle.name
# Write the above class definitions to source files.
files = []
for filename, source in sources:
fullpath = os.path.join(cls.dir, filename)
files.append(fullpath)
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
with open(fullpath, "w") as f:
f.write(source)
# Get two codebases for the two different handlers.
cls.codebase_ast = java_ast.codebase_to_units(cls.dir)
cls.codebase_native = java_native.codebase_to_units(cls.dir, 'javac `find -name "*.java" | xargs`', '.')
# By default, run the java_ast handler tests.
# The java_native handler will need to override setUp()
# and reassign cls.codebase.
cls.codebase = cls.codebase_ast
@classmethod
def tearDownClass(cls):
cls.dir_handle.cleanup()
def setUp(self):
self.codebase = self.__class__.codebase
self.codebase_ast = self.__class__.codebase_ast
class TestClassesAST(TestJavaHandlerBase):
def test_class_names(self):
self.assertTrue("packageX.ClassA" in self.codebase)
self.assertTrue("packageX.ClassB" in self.codebase)
self.assertTrue("packageY.ClassC" in self.codebase)
self.assertTrue("packageY.InterfaceD" in self.codebase)
self.assertTrue("packageY.ClassD" in self.codebase)
def test_class_functions(self):
self.assertTrue("returnsVoid" in self.codebase["packageX.ClassA"].functions)
self.assertFalse("privateReturnsVoid" in self.codebase["packageX.ClassA"].functions)
def test_class_fields(self):
self.assertTrue("FIVE" in self.codebase["packageX.ClassB"].fields)
self.assertFalse("ONE" in self.codebase["packageX.ClassB"].fields)
def test_inner_class(self):
self.assertTrue("Inner" in self.codebase["packageY.ClassC"].units)
class TestClassesNative(TestClassesAST):
def setUp(self):
super(TestClassesNative, self).setUp()
self.codebase = self.codebase_native
# java_ast and java_native disagree on what inner classes should be called,
# so we need to override this test.
def test_inner_class(self):
self.assertTrue("packageY.ClassC$Inner" in self.codebase["packageY.ClassC"].units)
class TestMethodOverloadingAST(TestJavaHandlerBase):
def test_overloading_possible(self):
self.assertEqual(len(self.codebase["packageY.ClassC"].functions["overloaded"].signatures), 3)
def test_additional_parameter(self):
function = self.codebase["packageY.ClassC"].functions["overloaded"]
self.assertTrue(any(len(sig.parameters) == 3 and sig.parameters[2].type.name == "packageX.ClassA" for sig in function.signatures))
def test_parameter_different_type(self):
function = self.codebase["packageY.ClassC"].functions["overloaded"]
self.assertTrue(any(len(sig.parameters) == 3 and sig.parameters[2].type.name == "packageX.ClassB" for sig in function.signatures))
class TestMethodOverloadingNative(TestMethodOverloadingAST):
def setUp(self):
super(TestMethodOverloadingNative, self).setUp()
self.codebase = self.codebase_native
class TestTypesAST(TestJavaHandlerBase):
def test_type_and_array_of_type_are_different(self):
t = self.codebase["packageY.ClassD"].functions["acceptsClassA"].signatures[0].parameters[1].type
arrayT = self.codebase["packageY.ClassD"].functions["acceptsArrayClassA"].signatures[0].parameters[1].type
self.assertFalse(t.is_compatible(arrayT))
self.assertFalse(arrayT.is_compatible(t))
def test_superclass_compatible_with_subclass(self):
superclass = self.codebase["packageY.ClassD"].functions["acceptsClassA"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsClassC"].signatures[0].parameters[1].type
self.assertTrue(superclass.is_compatible(subclass))
def test_superclass_array_compatible_with_subclass(self):
superclass = self.codebase["packageY.ClassD"].functions["acceptsArrayClassA"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsArrayClassC"].signatures[0].parameters[1].type
self.assertTrue(superclass.is_compatible(subclass))
def test_subclass_not_compatible_with_superclass(self):
superclass = self.codebase["packageY.ClassD"].functions["acceptsClassA"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsClassC"].signatures[0].parameters[1].type
self.assertFalse(subclass.is_compatible(superclass))
def test_subclass_array_not_compatible_with_superclass(self):
superclass = self.codebase["packageY.ClassD"].functions["acceptsArrayClassA"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsArrayClassC"].signatures[0].parameters[1].type
self.assertFalse(subclass.is_compatible(superclass))
def test_superclass_compatible_with_subclass_skip_one(self):
superclass = self.codebase["packageY.ClassD"].functions["acceptsClassA"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsClassD"].signatures[0].parameters[1].type
self.assertTrue(superclass.is_compatible(subclass))
def test_interface_compatible_with_class(self):
interface = self.codebase["packageY.ClassD"].functions["acceptsIfaceD"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsClassD"].signatures[0].parameters[1].type
self.assertTrue(interface.is_compatible(subclass))
def test_class_not_compatible_with_interface(self):
interface = self.codebase["packageY.ClassD"].functions["acceptsIfaceD"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsClassD"].signatures[0].parameters[1].type
self.assertFalse(subclass.is_compatible(interface))
def test_interface_extension(self):
interface1 = self.codebase["packageY.ClassD"].functions["acceptsIfaceD"].signatures[0].parameters[1].type
interface2 = self.codebase["packageY.ClassD"].functions["acceptsIfaceE"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsIfaceX"].signatures[0].parameters[1].type
self.assertTrue(interface1.is_compatible(subclass))
self.assertTrue(interface2.is_compatible(subclass))
class TestTypesNative(TestTypesAST):
def setUp(self):
super(TestTypesNative, self).setUp()
self.codebase = self.codebase_native
# Need to patch the 'location' of every type - i.e.
# where TypeCompatibilityChecker will try to find the
# class files.
for function in self.codebase["packageY.ClassD"].functions.values():
for signature in function.signatures:
for parameter in signature.parameters:
parameter.type.location = self.dir
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
sexroute/commandergenius | project/jni/python/src/Demo/tkinter/matt/animation-w-velocity-ctrl.py | 47 | 1201 | from Tkinter import *
# this is the same as simple-demo-1.py, but uses
# subclassing.
# note that there is no explicit call to start Tk.
# Tkinter is smart enough to start the system if it's not already going.
class Test(Frame):
def printit(self):
print "hi"
def createWidgets(self):
self.QUIT = Button(self, text='QUIT', foreground='red',
command=self.quit)
self.QUIT.pack(side=BOTTOM, fill=BOTH)
self.draw = Canvas(self, width="5i", height="5i")
self.speed = Scale(self, orient=HORIZONTAL, from_=-100, to=100)
self.speed.pack(side=BOTTOM, fill=X)
# all of these work..
self.draw.create_rectangle(0, 0, 10, 10, tags="thing", fill="blue")
self.draw.pack(side=LEFT)
def moveThing(self, *args):
velocity = self.speed.get()
str = float(velocity) / 1000.0
str = "%ri" % (str,)
self.draw.move("thing", str, str)
self.after(10, self.moveThing)
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
self.after(10, self.moveThing)
test = Test()
test.mainloop()
| lgpl-2.1 |
hifly/OpenUpgrade | addons/website_mail/__openerp__.py | 379 | 1623 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Mail',
'category': 'Hidden',
'summary': 'Website Module for Mail',
'version': '0.1',
'description': """Glue module holding mail improvements for website.""",
'author': 'OpenERP SA',
'depends': ['website', 'mail', 'email_template'],
'data': [
'views/snippets.xml',
'views/website_mail.xml',
'views/website_email_designer.xml',
'views/email_template_view.xml',
'data/mail_groups.xml',
'security/website_mail.xml',
],
'qweb': [
'static/src/xml/website_mail.xml'
],
'installable': True,
'auto_install': True,
}
| agpl-3.0 |
stylianos-kampakis/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
RapidApplicationDevelopment/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/operator_pd_test.py | 9 | 12427 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# For private members.
from tensorflow.contrib.distributions.python.ops import operator_pd
distributions = tf.contrib.distributions
class OperatorShape(operator_pd.OperatorPDBase):
"""Operator implements the ABC method ._shape."""
def __init__(self, shape):
self._stored_shape = shape
@property
def verify_pd(self):
return True
def get_shape(self):
return tf.TensorShape(self._stored_shape)
def _shape(self):
return tf.shape(np.random.rand(*self._stored_shape))
@property
def name(self):
return "OperatorShape"
def dtype(self):
return tf.int32
@property
def inputs(self):
return []
class OperatorSqrtSolve(OperatorShape):
"""Operator implements .sqrt_solve."""
def __init__(self, chol_array):
self._chol = tf.convert_to_tensor(chol_array)
super(OperatorSqrtSolve, self).__init__(chol_array.shape)
def _sqrt_solve(self, rhs):
return tf.matrix_triangular_solve(self._chol, rhs, lower=True)
def _batch_sqrt_solve(self, rhs):
return tf.matrix_triangular_solve(self._chol, rhs, lower=True)
def _inv_quadratic_form_on_vectors(self, x):
return self._iqfov_via_sqrt_solve(x)
class OperatorSolve(OperatorShape):
"""Operator implements .solve."""
def __init__(self, chol):
self._pos_def_matrix = tf.matmul(chol, chol, adjoint_b=True)
super(OperatorSolve, self).__init__(chol.shape)
def _solve(self, rhs):
return tf.matrix_solve(self._pos_def_matrix, rhs)
def _batch_solve(self, rhs):
return tf.matrix_solve(self._pos_def_matrix, rhs)
def _inv_quadratic_form_on_vectors(self, x):
return self._iqfov_via_solve(x)
class OperatorPDBaseTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_cholesky_array(self, shape):
mat = self._rng.rand(*shape)
chol = distributions.matrix_diag_transform(mat, transform=tf.nn.softplus)
# Zero the upper triangle because we're using this as a true Cholesky factor
# in our tests.
return tf.matrix_band_part(chol, -1, 0).eval()
def _numpy_inv_quadratic_form_on_vectors(self, chol, x):
# Numpy works with batches now (calls them "stacks").
x_expanded = np.expand_dims(x, -1)
whitened = np.linalg.solve(chol, x_expanded)
return (whitened**2).sum(axis=-1).sum(axis=-1)
def testAllShapesMethodsDefinedByTheOneAbstractpropertyShape(self):
shape = (1, 2, 3, 3)
with self.test_session():
operator = OperatorShape(shape)
self.assertAllEqual(shape, operator.shape().eval())
self.assertAllEqual(4, operator.rank().eval())
self.assertAllEqual((1, 2), operator.batch_shape().eval())
self.assertAllEqual((1, 2, 3), operator.vector_shape().eval())
self.assertAllEqual(3, operator.vector_space_dimension().eval())
self.assertEqual(shape, operator.get_shape())
self.assertEqual((1, 2), operator.get_batch_shape())
self.assertEqual((1, 2, 3), operator.get_vector_shape())
def testIqfovXRankSameAsBroadcastRankUsingSqrtSolve(self):
with self.test_session():
for batch_shape in [(), (2,)]:
for k in [1, 3]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSqrtSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
self.assertEqual(batch_shape, qf.get_shape())
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertAllClose(numpy_qf, qf.eval())
def testIqfovXRankGreaterThanBroadcastRankUsingSqrtSolve(self):
with self.test_session():
for batch_shape in [(), (2,), (2, 3)]:
for k in [1, 4]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
# chol will not have the leading dimension.
chol_shape = batch_shape[1:] + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSqrtSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertEqual(batch_shape, qf.get_shape())
self.assertAllClose(numpy_qf, qf.eval())
def testIqfovXRankTwoGreaterThanBroadcastRankUsingSqrtSolve(self):
with self.test_session():
for batch_shape in [(2, 3), (2, 3, 4), (2, 3, 4, 5)]:
for k in [1, 4]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
# chol will not have the leading two dimensions.
chol_shape = batch_shape[2:] + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSqrtSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertEqual(batch_shape, qf.get_shape())
self.assertAllClose(numpy_qf, qf.eval())
def testIqfovXRankSameAsBroadcastRankUsingSolve(self):
with self.test_session():
for batch_shape in [(), (2,)]:
for k in [1, 3]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
self.assertEqual(batch_shape, qf.get_shape())
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertAllClose(numpy_qf, qf.eval())
def testIqfovXRankGreaterThanBroadcastRankUsingSolve(self):
with self.test_session():
for batch_shape in [(2,), (2, 3)]:
for k in [1, 4]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
# chol will not have the leading dimension.
chol_shape = batch_shape[1:] + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertEqual(batch_shape, qf.get_shape())
self.assertAllClose(numpy_qf, qf.eval())
def testIqfovXRankTwoGreaterThanBroadcastRankUsingSolve(self):
with self.test_session():
for batch_shape in [(2, 3), (2, 3, 4), (2, 3, 4, 5)]:
for k in [1, 4]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
# chol will not have the leading two dimensions.
chol_shape = batch_shape[2:] + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertEqual(batch_shape, qf.get_shape())
self.assertAllClose(numpy_qf, qf.eval())
class FlipMatrixToVectorTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState()
def testMatrixAndVectorBatchShapesTheSame(self):
batch_shape = [6, 2, 3]
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = self._rng.rand(2, 3, 4, 6)
vec = operator_pd.flip_matrix_to_vector(
mat, batch_shape, static_batch_shape)
vec_v = vec.eval()
self.assertAllEqual((6, 2, 3, 4), vec_v.shape)
self.assertAllEqual(mat[1, 2, 3, 4], vec_v[4, 1, 2, 3])
def testMatrixAndVectorBatchShapesSameRankButPermuted(self):
batch_shape = [6, 3, 2]
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = self._rng.rand(2, 3, 4, 6)
vec = operator_pd.flip_matrix_to_vector(
mat, batch_shape, static_batch_shape)
vec_v = vec.eval()
self.assertAllEqual((6, 3, 2, 4), vec_v.shape)
def testVectorBatchShapeLongerThanMatrixBatchShape(self):
batch_shape = [2, 3, 2, 3]
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = self._rng.rand(2, 3, 4, 6)
vec = operator_pd.flip_matrix_to_vector(
mat, batch_shape, static_batch_shape)
vec_v = vec.eval()
self.assertAllEqual((2, 3, 2, 3, 4), vec_v.shape)
def testMatrixBatchShapeHasASingletonThatVecBatchShapeDoesnt(self):
batch_shape = [6, 3]
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = self._rng.rand(1, 3, 4, 6)
vec = operator_pd.flip_matrix_to_vector(
mat, batch_shape, static_batch_shape)
vec_v = vec.eval()
self.assertAllEqual((6, 3, 4), vec_v.shape)
self.assertAllEqual(mat[0, 2, 3, 4], vec_v[4, 2, 3])
class FlipVectorToMatrixTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState()
def testWhenXBatchRankIsSameAsBatchRankArg(self):
batch_shape = [4, 5]
x = self._rng.rand(4, 5, 6)
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = operator_pd.flip_vector_to_matrix(
x, batch_shape, static_batch_shape)
mat_v = mat.eval()
expected_mat_v = x.reshape(x.shape + (1,))
self.assertAllEqual(expected_mat_v, mat_v)
def testWhenXHasOneLargerLargerBatchRankThanBatchRankArg(self):
batch_shape = [4, 5]
x = self._rng.rand(3, 4, 5, 6)
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = operator_pd.flip_vector_to_matrix(
x, batch_shape, static_batch_shape)
mat_v = mat.eval()
self.assertAllEqual((4, 5, 6, 3), mat_v.shape)
self.assertAllEqual(x[2, 2, 2, 1], mat_v[2, 2, 1, 2])
def testWhenBatchShapeRequiresReshapeOfVectorBatchShape(self):
batch_shape = [5, 4]
x = self._rng.rand(3, 4, 5, 6) # Note x has (4,5) and batch_shape is (5, 4)
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = operator_pd.flip_vector_to_matrix(
x, batch_shape, static_batch_shape)
mat_v = mat.eval()
self.assertAllEqual((5, 4, 6, 3), mat_v.shape)
def testWhenXHasTwoLargerLargerBatchRankThanBatchRankArg(self):
batch_shape = [4, 5]
x = self._rng.rand(2, 3, 4, 5, 6)
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = operator_pd.flip_vector_to_matrix(
x, batch_shape, static_batch_shape)
mat_v = mat.eval()
self.assertAllEqual((4, 5, 6, 2*3), mat_v.shape)
class ExtractBatchShapeTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState()
def testXHasEmptyBatchShape(self):
with self.test_session():
x = self._rng.rand(2, 3)
num_event_dims = 2
batch_shape = operator_pd.extract_batch_shape(x, num_event_dims)
self.assertAllEqual([], batch_shape.eval())
def testXHasNonEmptyBatchShape(self):
with self.test_session():
x = self._rng.rand(2, 3, 4, 5)
num_event_dims = 2
batch_shape = operator_pd.extract_batch_shape(x, num_event_dims)
self.assertAllEqual([2, 3], batch_shape.eval())
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
wubenqi/protoc-gen-luabind | protoc-gen-luabind/google/protobuf/internal/decoder.py | 223 | 26136 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
_NAN = _POS_INF * 0
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(mask):
"""Like _VarintDecoder() but decodes signed values."""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
_DecodeVarint = _VarintDecoder((1 << 64) - 1)
_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1)
_DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1)
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos)
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if ((float_bytes[3] in '\x7F\xFF')
and (float_bytes[2] >= '\x80')):
# If at least one significand bit is set...
if float_bytes[0:3] != '\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3] == '\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7] in '\x7F\xFF')
and (double_bytes[6] >= '\xF0')
and (double_bytes[0:7] != '\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
# --------------------------------------------------------------------
Int32Decoder = EnumDecoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = unicode
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(local_unicode(buffer[pos:new_pos], 'utf-8'))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = local_unicode(buffer[pos:new_pos], 'utf-8')
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(extensions_by_number):
"""Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append((MESSAGE_SET_ITEM_TAG,
buffer[message_set_item_start:pos]))
return pos
return DecodeItem
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
local_ord = ord
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = local_ord(tag_bytes[0]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
| apache-2.0 |
arruda/rmr | rmr/apps/accounts/migrations/0001_initial.py | 1 | 4385 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table('accounts_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('quota', self.gf('django.db.models.fields.DecimalField')(default='0', null=True, max_digits=10, decimal_places=2, blank=True)),
))
db.send_create_signal('accounts', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table('accounts_userprofile')
models = {
'accounts.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quota': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts'] | mit |
sigma-random/scrapy | scrapy/utils/testproc.py | 39 | 1465 | import sys
import os
from twisted.internet import reactor, defer, protocol
class ProcessTest(object):
command = None
prefix = [sys.executable, '-m', 'scrapy.cmdline']
cwd = os.getcwd() # trial chdirs to temp dir
def execute(self, args, check_code=True, settings=None):
env = os.environ.copy()
if settings is not None:
env['SCRAPY_SETTINGS_MODULE'] = settings
cmd = self.prefix + [self.command] + list(args)
pp = TestProcessProtocol()
pp.deferred.addBoth(self._process_finished, cmd, check_code)
reactor.spawnProcess(pp, cmd[0], cmd, env=env, path=self.cwd)
return pp.deferred
def _process_finished(self, pp, cmd, check_code):
if pp.exitcode and check_code:
msg = "process %s exit with code %d" % (cmd, pp.exitcode)
msg += "\n>>> stdout <<<\n%s" % pp.out
msg += "\n"
msg += "\n>>> stderr <<<\n%s" % pp.err
raise RuntimeError(msg)
return pp.exitcode, pp.out, pp.err
class TestProcessProtocol(protocol.ProcessProtocol):
def __init__(self):
self.deferred = defer.Deferred()
self.out = ''
self.err = ''
self.exitcode = None
def outReceived(self, data):
self.out += data
def errReceived(self, data):
self.err += data
def processEnded(self, status):
self.exitcode = status.value.exitCode
self.deferred.callback(self)
| bsd-3-clause |
xiandiancloud/edx-platform-Y | common/djangoapps/student/tests/test_microsite.py | 15 | 3704 | """
Test for User Creation from Micro-Sites
"""
from django.test import TestCase
from student.models import UserSignupSource
import mock
import json
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
FAKE_MICROSITE = {
"SITE_NAME": "openedx.localhost",
"REGISTRATION_EXTRA_FIELDS": {
"address1": "required",
"city": "required",
"state": "required",
"country": "required",
"company": "required",
"title": "required"
},
"extended_profile_fields": [
"address1", "state", "company", "title"
]
}
def fake_site_name(name, default=None): # pylint: disable=W0613
"""
create a fake microsite site name
"""
if name == 'SITE_NAME':
return 'openedx.localhost'
else:
return default
def fake_microsite_get_value(name, default=None): # pylint: disable=W0613
"""
create a fake microsite site name
"""
return FAKE_MICROSITE.get(name, default)
class TestMicrosite(TestCase):
"""Test for Account Creation from a white labeled Micro-Sites"""
def setUp(self):
self.username = "test_user"
self.url = reverse("create_account")
self.params = {
"username": self.username,
"email": "test@example.org",
"password": "testpass",
"name": "Test User",
"honor_code": "true",
"terms_of_service": "true",
}
self.extended_params = dict(self.params.items() + {
"address1": "foo",
"city": "foo",
"state": "foo",
"country": "foo",
"company": "foo",
"title": "foo"
}.items())
@mock.patch("microsite_configuration.microsite.get_value", fake_site_name)
def test_user_signup_source(self):
"""
test to create a user form the microsite and see that it record has been
saved in the UserSignupSource Table
"""
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertGreater(len(UserSignupSource.objects.filter(site='openedx.localhost')), 0)
def test_user_signup_from_non_micro_site(self):
"""
test to create a user form the non-microsite. The record should not be saved
in the UserSignupSource Table
"""
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(UserSignupSource.objects.filter(site='openedx.localhost')), 0)
@mock.patch("microsite_configuration.microsite.get_value", fake_microsite_get_value)
def test_user_signup_missing_enhanced_profile(self):
"""
test to create a user form the microsite but don't provide any of the microsite specific
profile information
"""
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 400)
@mock.patch("microsite_configuration.microsite.get_value", fake_microsite_get_value)
def test_user_signup_including_enhanced_profile(self):
"""
test to create a user form the microsite but don't provide any of the microsite specific
profile information
"""
response = self.client.post(self.url, self.extended_params)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
meta = json.loads(user.profile.meta)
self.assertEqual(meta['address1'], 'foo')
self.assertEqual(meta['state'], 'foo')
self.assertEqual(meta['company'], 'foo')
self.assertEqual(meta['title'], 'foo')
| agpl-3.0 |
rosterloh/CMSIS-DAP | tools/get_binary.py | 21 | 1166 | """
CMSIS-DAP Interface Firmware
Copyright (c) 2009-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Extract and patch the interface without bootloader
"""
from options import get_options
from paths import get_interface_path, TMP_DIR
from utils import gen_binary, is_lpc, split_path
from os.path import join
if __name__ == '__main__':
options = get_options()
in_path = get_interface_path(options.interface, options.target, bootloader=False)
_, name, _ = split_path(in_path)
out_path = join(TMP_DIR, name + '.bin')
print '\nELF: %s' % in_path
gen_binary(in_path, out_path, is_lpc(options.interface))
print "\nBINARY: %s" % out_path
| apache-2.0 |
hip-odoo/odoo | addons/survey/tests/test_survey.py | 22 | 17884 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import random
import re
from collections import Counter
from itertools import product
from urlparse import urljoin
from odoo import _
from odoo.exceptions import UserError
from odoo.tests.common import TransactionCase
from odoo.addons.website.models.website import slug
class TestSurvey(TransactionCase):
def setUp(self):
super(TestSurvey, self).setUp()
User = self.env['res.users'].with_context({'no_reset_password': True})
(group_survey_user, group_employee) = (self.ref('survey.group_survey_user'), self.ref('base.group_user'))
self.survey_manager = User.create({
'name': 'Gustave Doré', 'login': 'Gustav','email': 'gustav.dore@example.com',
'groups_id': [(6, 0, [self.ref('survey.group_survey_manager'), group_survey_user, group_employee])]})
self.survey_user = User.create({
'name': 'Lukas Peeters', 'login': 'Lukas', 'email': 'lukas.petters@example.com',
'groups_id': [(6, 0, [group_survey_user, group_employee])]})
self.user_public = User.create({
'name': 'Wout Janssens', 'login': 'Wout', 'email': 'wout.janssens@example.com',
'groups_id': [(6, 0, [self.ref('base.group_public')])]})
self.survey1 = self.env['survey.survey'].sudo(self.survey_manager).create({'title': "S0", 'page_ids': [(0, 0, {'title': "P0"})]})
self.page1 = self.survey1.page_ids[0]
def test_00_create_minimal_survey(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0'})
self.assertEqual(self.survey1.title, "S0", msg="Title of the survey is somehow modified.")
self.assertEqual(len(self.survey1.page_ids), 1, msg="Additional Pages are linked with the survey after creation.")
self.assertEqual(self.page1.title, "P0", msg="Title of the page is somehow modified.")
self.assertEqual(len(self.page1.question_ids), 1, msg="Additional questions are linked with the page after creation.")
self.assertEqual(question.question, "Q0", msg="Title of the Question is somehow modified.")
def test_01_question_type_validation_save_line_function(self):
for (question_type, text) in self.env['survey.question']._fields['type'].selection:
# Each question ype must have validation function.
self.assertTrue(hasattr(self.env['survey.question'], 'validate_' + question_type), msg="Question must have a validation method in\
the form of 'validate_' followed by the name of the type.")
# Survey Input Lines must have validation function for each line.
self.assertTrue(hasattr(self.env['survey.user_input_line'], 'save_line_' + question_type), msg="Inputline must have Save method in \
the form of 'save_line_' followed by the name of the type.")
def test_02_question_answer_required(self):
for (question_type, text) in self.env['survey.question']._fields['type'].selection:
# Blank value of field is not accepted for mandatory questions.
if question_type == 'multiple_choice':
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'multiple_choice',
'constr_mandatory': True, 'constr_error_msg': 'Error',
'labels_ids': [(0, 0, {'value': "MChoice0", "quizz_mark": 0}), (0, 0, {'value': "MChoice1", "quizz_mark": 0})]})
elif question_type == 'matrix':
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'matrix', 'matrix_subtype': 'simple',
'constr_mandatory': True, 'constr_error_msg': 'Error',
'labels_ids': [(0, 0, {'value': "Column0", "quizz_mark": 0}), (0, 0, {'value': "Column1", "quizz_mark": 0})],
'labels_ids_2': [(0, 0, {'value': "Row0", "quizz_mark": 0}), (0, 0, {'value': "Row1", "quizz_mark": 0})]})
else:
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': question_type, 'constr_mandatory': True, 'constr_error_msg': 'Error'})
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, question.id)
self.assertDictEqual({answer_tag: "Error"}, question.validate_question({answer_tag: ''}, answer_tag),
msg=("Validation function for type %s is unable to generate error if it is mandatory and answer is blank." % question_type))
def test_03_question_textbox(self):
questions = [
self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'textbox', 'validation_email': True}),
self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q1', 'type': 'textbox', 'validation_required': True,
'validation_length_min': 2, 'validation_length_max': 8, 'validation_error_msg': "Error"})]
results = [('test @ testcom', _('This answer must be an email address')), ('t', 'Error')]
for i in range(len(questions)):
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, questions[i].id)
self.assertEqual(questions[i].validate_question({answer_tag: results[i][0]}, answer_tag), {answer_tag: results[i][1]}, msg="\
Validation function for textbox is unable to notify if answer is violating the validation rules")
def test_04_question_numerical_box(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'numerical_box', 'validation_required': True,
'validation_min_float_value': 2.1, 'validation_max_float_value': 3.0, 'validation_error_msg': "Error"})
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, question.id)
results = [('aaa', _('This is not a number')), ('4.5', 'Error'), ('0.1', 'Error')]
for i in range(len(results)):
self.assertEqual(question.validate_question({answer_tag: results[i][0]}, answer_tag), {answer_tag: results[i][1]}, msg="\
Validation function for type numerical_box is unable to notify if answer is violating the validation rules")
def test_05_question_datetime(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'datetime', 'validation_required': True,
'validation_min_date': '2015-03-20 00:00:00', 'validation_max_date': '2015-03-25 00:00:00', 'validation_error_msg': "Error"})
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, question.id)
results = [('2015-55-10', _('This is not a date/time')), ('2015-03-19 00:00:00', 'Error'), ('2015-03-26 00:00:00', 'Error')]
for i in range(len(results)):
self.assertEqual(question.validate_question({answer_tag: results[i][0]}, answer_tag), {answer_tag: results[i][1]}, msg="\
Validation function for type datetime is unable to notify if answer is violating the validation rules")
def test_06_survey_sharing(self):
# Case-1: Executing action with correct data.
correct_survey = self.env['survey.survey'].sudo(self.survey_manager).create({
'title': "S0", 'stage_id': self.env['survey.stage'].search([('sequence', '=', 1)]).id,
'page_ids': [(0, 0, {'title': "P0", 'question_ids': [(0, 0, {'question': "Q0", 'type': 'free_text'})]})]})
action = correct_survey.action_send_survey()
template = self.env.ref('survey.email_template_survey', raise_if_not_found=False)
ctx = dict(
self.env.context,
default_model='survey.survey',
default_res_id=correct_survey.id,
default_survey_id=correct_survey.id,
default_use_template=bool(template),
default_template_id=template and template.id or False,
default_composition_mode='comment')
self.assertDictEqual(action, {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'survey.mail.compose.message',
'target': 'new',
'context': ctx,
})
# Case-2: Executing action with incorrect data.
surveys = [
self.env['survey.survey'].sudo(self.survey_manager).create({ # Survey without any page or question.
'title': "Test survey"}),
self.env['survey.survey'].sudo(self.survey_manager).create({ # Closed Survey.
'title': "S0", 'stage_id': self.env['survey.stage'].search([('closed', '=', True)]).id, # Getting Closed stage id.
'page_ids': [(0, 0, {'title': "P0", 'question_ids': [(0, 0, {'question': "Q0", 'type': 'free_text'})]})]})]
for survey in surveys:
self.assertRaises(UserError, survey.action_send_survey)
def test_07_survey_email_message(self):
# Case-1: Executing send_mail with correct data.
partner = self.env['res.partner'].create({'name': 'Marie De Cock', 'email': 'marie.de.cock@gmail.com'})
survey_mail_message = self.env['survey.mail.compose.message'].sudo(self.survey_manager).create({
'survey_id': self.survey1.id, 'public': 'email_public_link', 'body': '__URL__', 'partner_ids': [(4, partner.id)]})
survey_mail_message.send_mail()
# Case-2: Executing send_mail with incorrect data.
mail_messages = [
self.env['survey.mail.compose.message'].sudo(self.survey_manager).create({ # Mail Message without __URL__ in body.
'survey_id': self.survey1.id, 'public': 'email_public_link'}),
self.env['survey.mail.compose.message'].sudo(self.survey_manager).create({ # Mail Message without recipents.
'survey_id': self.survey1.id, 'public': 'email_public_link', 'body': "__URL__"})]
for message in mail_messages:
self.assertRaises(UserError, message.send_mail)
def test_08_survey_urls(self):
def validate_url(url):
""" Reference: https://github.com/django/django/blob/master/django/core/validators.py """
url_regex = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return True if url_regex.match(url) else False
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
urltypes = {'public': 'start', 'print': 'print', 'result': 'results'}
for urltype, urltxt in urltypes.iteritems():
survey_url = getattr(self.survey1, urltype + '_url')
survey_url_relative = getattr(self.survey1.with_context({'relative_url': True}), urltype + '_url')
self.assertTrue(validate_url(survey_url))
url = "survey/%s/%s" % (urltxt, slug(self.survey1))
full_url = urljoin(base_url, url)
self.assertEqual(full_url, survey_url)
self.assertEqual('/' + url, survey_url_relative)
if urltype == 'public':
url_html = '<a href="%s">Click here to start survey</a>'
self.assertEqual(url_html % full_url, getattr(self.survey1, urltype + '_url_html'), msg="Public URL is incorrect")
self.assertEqual(url_html % ('/' + url), getattr(self.survey1.with_context({'relative_url': True}), urltype + '_url_html'), msg="Public URL is incorrect.")
def test_09_answer_survey(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0'})
input_portal = self.env['survey.user_input'].sudo(self.survey_user).create({
'survey_id': self.survey1.id,
'partner_id': self.survey_user.partner_id.id,
'user_input_line_ids': [(0, 0, {
'skipped': False, 'answer_type': 'free_text', 'value_free_text': "Test Answer",
'survey_id': self.survey1.id, 'question_id': question.id})]})
input_public = self.env['survey.user_input'].sudo(self.user_public).create({
'survey_id': self.survey1.id,
'partner_id': self.survey_user.partner_id.id,
'user_input_line_ids': [(0, 0, {
'skipped': False, 'answer_type': 'free_text', 'value_free_text': "Test Answer",
'survey_id': self.survey1.id, 'question_id': question.id})]})
answers = [input_portal.user_input_line_ids[0], input_public.user_input_line_ids[0]]
expected_values = {'answer_type': 'free_text', 'value_free_text': "Test Answer"}
for answer in answers:
for field, value in expected_values.iteritems():
self.assertEqual(getattr(answer, field), value, msg="Unable to answer the survey. Expected behaviour of %s is not proper." % (field))
def test_10_survey_result_simple_multiple_choice(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'simple_choice',
'labels_ids': [(0, 0, {'value': "Choice0", 'quizz_mark': 0}), (0, 0, {'value': "Choice1", 'quizz_mark': 0})]})
for i in range(3):
self.env['survey.user_input'].sudo(self.user_public).create({'survey_id': self.survey1.id, 'user_input_line_ids': [(0, 0, {
'question_id': question.id,
'answer_type': 'suggestion',
'value_suggested': random.choice(question.labels_ids.ids)})]})
lines = [line.value_suggested.id for line in question.user_input_line_ids]
answers = [{'text': label.value, 'count': lines.count(label.id), 'answer_id': label.id} for label in question.labels_ids]
prp_result = self.env['survey.survey'].prepare_result(question)['answers']
answers.sort()
prp_result.sort()
self.assertEqual(prp_result, answers, msg="Statistics of simple, multiple choice questions are different from expectation")
def test_11_survey_result_matrix(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'matrix', 'matrix_subtype': 'simple',
'labels_ids': [(0, 0, {'value': "Column0", "quizz_mark": 0}), (0, 0, {'value': "Column1", "quizz_mark": 0})],
'labels_ids_2': [(0, 0, {'value': "Row0", "quizz_mark": 0}), (0, 0, {'value': "Row1", "quizz_mark": 0})]})
for i in range(3):
self.env['survey.user_input'].sudo(self.user_public).create({'survey_id': self.survey1.id, 'user_input_line_ids': [(0, 0, {
'question_id': question.id, 'answer_type': 'suggestion', 'value_suggested': random.choice(question.labels_ids.ids),
'value_suggested_row': random.choice(question.labels_ids_2.ids)})]})
lines = [(line.value_suggested_row.id, line.value_suggested.id) for line in question.user_input_line_ids]
res = {}
for i in product(question.labels_ids_2.ids, question.labels_ids.ids):
res[i] = lines.count((i))
self.assertEqual(self.env['survey.survey'].prepare_result(question)['result'], res, msg="Statistics of matrix type questions are different from expectations")
def test_12_survey_result_numeric_box(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0', 'type': 'numerical_box'})
num = map(float, random.sample(range(1, 100), 3))
nsum = sum(num)
for i in range(3):
self.env['survey.user_input'].sudo(self.user_public).create({'survey_id': self.survey1.id, 'user_input_line_ids': [(0, 0, {
'question_id': question.id, 'answer_type': 'number', 'value_number': num[i]})]})
exresult = {
'average': round((nsum / len(num)), 2), 'max': round(max(num), 2),
'min': round(min(num), 2), 'sum': nsum, 'most_common': Counter(num).most_common(5)}
result = self.env['survey.survey'].prepare_result(question)
for key in exresult.keys():
self.assertEqual(result[key], exresult[key], msg="Statistics of numeric box type questions are different from expectations")
def test_13_survey_actions(self):
self.env['survey.question'].sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0', 'type': 'numerical_box'})
actions = {
'start': {'method': 'public', 'token': '/test', 'text': 'Start'},
'print': {'method': 'print', 'token': '/test', 'text': 'Print'},
'result': {'method': 'result', 'token': '', 'text': 'Results of the'},
'test': {'method': 'public', 'token': '/phantom', 'text': 'Results of the'}}
for action, val in actions.iteritems():
result = getattr(self.survey1.with_context({'survey_token': val['token'][1:]}), 'action_' + action + '_survey')()
url = getattr(self.survey1.with_context({'relative_url': True}), val['method'] + '_url') + val['token']
self.assertEqual(result['url'], url)
| agpl-3.0 |
WillGuan105/django | django/core/mail/backends/console.py | 696 | 1477 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
from django.utils import six
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def write_message(self, message):
msg = message.message()
msg_data = msg.as_bytes()
if six.PY3:
charset = msg.get_charset().get_output_charset() if msg.get_charset() else 'utf-8'
msg_data = msg_data.decode(charset)
self.stream.write('%s\n' % msg_data)
self.stream.write('-' * 79)
self.stream.write('\n')
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
msg_count = 0
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.write_message(message)
self.stream.flush() # flush after each message
msg_count += 1
if stream_created:
self.close()
except Exception:
if not self.fail_silently:
raise
return msg_count
| bsd-3-clause |
g19-hs/personfinder | app/pytz/zoneinfo/Atlantic/Stanley.py | 9 | 5454 | '''tzinfo timezone information for Atlantic/Stanley.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Stanley(DstTzInfo):
'''Atlantic/Stanley timezone definition. See datetime.tzinfo for details'''
zone = 'Atlantic/Stanley'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1912,3,12,3,51,24),
d(1937,9,26,4,0,0),
d(1938,3,20,3,0,0),
d(1938,9,25,4,0,0),
d(1939,3,19,3,0,0),
d(1939,10,1,4,0,0),
d(1940,3,24,3,0,0),
d(1940,9,29,4,0,0),
d(1941,3,23,3,0,0),
d(1941,9,28,4,0,0),
d(1942,3,22,3,0,0),
d(1942,9,27,4,0,0),
d(1943,1,1,3,0,0),
d(1983,5,1,4,0,0),
d(1983,9,25,3,0,0),
d(1984,4,29,2,0,0),
d(1984,9,16,3,0,0),
d(1985,4,28,2,0,0),
d(1985,9,15,3,0,0),
d(1986,4,20,3,0,0),
d(1986,9,14,4,0,0),
d(1987,4,19,3,0,0),
d(1987,9,13,4,0,0),
d(1988,4,17,3,0,0),
d(1988,9,11,4,0,0),
d(1989,4,16,3,0,0),
d(1989,9,10,4,0,0),
d(1990,4,22,3,0,0),
d(1990,9,9,4,0,0),
d(1991,4,21,3,0,0),
d(1991,9,15,4,0,0),
d(1992,4,19,3,0,0),
d(1992,9,13,4,0,0),
d(1993,4,18,3,0,0),
d(1993,9,12,4,0,0),
d(1994,4,17,3,0,0),
d(1994,9,11,4,0,0),
d(1995,4,16,3,0,0),
d(1995,9,10,4,0,0),
d(1996,4,21,3,0,0),
d(1996,9,15,4,0,0),
d(1997,4,20,3,0,0),
d(1997,9,14,4,0,0),
d(1998,4,19,3,0,0),
d(1998,9,13,4,0,0),
d(1999,4,18,3,0,0),
d(1999,9,12,4,0,0),
d(2000,4,16,3,0,0),
d(2000,9,10,4,0,0),
d(2001,4,15,5,0,0),
d(2001,9,2,6,0,0),
d(2002,4,21,5,0,0),
d(2002,9,1,6,0,0),
d(2003,4,20,5,0,0),
d(2003,9,7,6,0,0),
d(2004,4,18,5,0,0),
d(2004,9,5,6,0,0),
d(2005,4,17,5,0,0),
d(2005,9,4,6,0,0),
d(2006,4,16,5,0,0),
d(2006,9,3,6,0,0),
d(2007,4,15,5,0,0),
d(2007,9,2,6,0,0),
d(2008,4,20,5,0,0),
d(2008,9,7,6,0,0),
d(2009,4,19,5,0,0),
d(2009,9,6,6,0,0),
d(2010,4,18,5,0,0),
d(2010,9,5,6,0,0),
d(2011,4,17,5,0,0),
d(2011,9,4,6,0,0),
d(2012,4,15,5,0,0),
d(2012,9,2,6,0,0),
d(2013,4,21,5,0,0),
d(2013,9,1,6,0,0),
d(2014,4,20,5,0,0),
d(2014,9,7,6,0,0),
d(2015,4,19,5,0,0),
d(2015,9,6,6,0,0),
d(2016,4,17,5,0,0),
d(2016,9,4,6,0,0),
d(2017,4,16,5,0,0),
d(2017,9,3,6,0,0),
d(2018,4,15,5,0,0),
d(2018,9,2,6,0,0),
d(2019,4,21,5,0,0),
d(2019,9,1,6,0,0),
d(2020,4,19,5,0,0),
d(2020,9,6,6,0,0),
d(2021,4,18,5,0,0),
d(2021,9,5,6,0,0),
d(2022,4,17,5,0,0),
d(2022,9,4,6,0,0),
d(2023,4,16,5,0,0),
d(2023,9,3,6,0,0),
d(2024,4,21,5,0,0),
d(2024,9,1,6,0,0),
d(2025,4,20,5,0,0),
d(2025,9,7,6,0,0),
d(2026,4,19,5,0,0),
d(2026,9,6,6,0,0),
d(2027,4,18,5,0,0),
d(2027,9,5,6,0,0),
d(2028,4,16,5,0,0),
d(2028,9,3,6,0,0),
d(2029,4,15,5,0,0),
d(2029,9,2,6,0,0),
d(2030,4,21,5,0,0),
d(2030,9,1,6,0,0),
d(2031,4,20,5,0,0),
d(2031,9,7,6,0,0),
d(2032,4,18,5,0,0),
d(2032,9,5,6,0,0),
d(2033,4,17,5,0,0),
d(2033,9,4,6,0,0),
d(2034,4,16,5,0,0),
d(2034,9,3,6,0,0),
d(2035,4,15,5,0,0),
d(2035,9,2,6,0,0),
d(2036,4,20,5,0,0),
d(2036,9,7,6,0,0),
d(2037,4,19,5,0,0),
d(2037,9,6,6,0,0),
]
_transition_info = [
i(-13860,0,'SMT'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,0,'FKT'),
i(-7200,3600,'FKST'),
i(-10800,0,'FKT'),
i(-7200,3600,'FKST'),
i(-10800,0,'FKT'),
i(-10800,0,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
i(-14400,0,'FKT'),
i(-10800,3600,'FKST'),
]
Stanley = Stanley()
| apache-2.0 |
eemirtekin/edx-platform | lms/envs/devplus.py | 102 | 2123 | """
This config file tries to mimic the production environment more closely than the
normal dev.py. It assumes you're running a local instance of MySQL 5.1 and that
you're running memcached. You'll want to use this to test caching and database
migrations.
Assumptions:
* MySQL 5.1 (version important? (askbot breaks on 5.5, but that's gone now))
Dir structure:
/envroot/
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .dev import *
WIKI_ENABLED = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'wwc',
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'general': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': 'general',
'VERSION': 5,
'KEY_FUNCTION': 'util.memcache.safe_key',
}
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
)
#PIPELINE = True
| agpl-3.0 |
VenturaDelMonte/staticwebanalyzer | SDK/sip-4.16.3/configure.py | 2 | 29998 | # This script handles the SIP configuration and generates the Makefiles.
#
# Copyright (c) 2014 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of SIP.
#
# This copy of SIP is licensed for use under the terms of the SIP License
# Agreement. See the file LICENSE for more details.
#
# This copy of SIP may also used under the terms of the GNU General Public
# License v2 or v3 as published by the Free Software Foundation which can be
# found in the files LICENSE-GPL2 and LICENSE-GPL3 included in this package.
#
# SIP is supplied WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
import sys
import os
import glob
import optparse
from distutils import sysconfig
try:
from importlib import invalidate_caches
except ImportError:
invalidate_caches = lambda: None
import siputils
# Initialise the globals.
sip_version = 0x041003
sip_version_str = "4.16.3"
py_version = sys.hexversion >> 8
py_platform = sys.platform
plat_py_site_dir = None
plat_py_inc_dir = None
plat_py_conf_inc_dir = None
plat_py_lib_dir = None
plat_sip_dir = None
plat_bin_dir = None
platform_specs = []
sip_bin_dir = ''
sip_inc_dir = ''
sip_module_dir = ''
sip_sip_dir = ''
sysroot = ''
src_dir = os.path.dirname(os.path.abspath(__file__))
sip_module_base = None
build_platform = None
# Constants.
DEFAULT_MACOSX_ARCH = 'i386 ppc'
MACOSX_SDK_DIRS = ('/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs', '/Developer/SDKs')
# The names of build macros extracted from the platform specific configuration
# files.
build_macro_names = [
"DEFINES", "CONFIG",
"CC",
"CFLAGS",
"CFLAGS_RELEASE", "CFLAGS_DEBUG",
"CFLAGS_CONSOLE", "CFLAGS_SHLIB", "CFLAGS_APP", "CFLAGS_THREAD",
"CFLAGS_MT", "CFLAGS_MT_DBG", "CFLAGS_MT_DLL", "CFLAGS_MT_DLLDBG",
"CFLAGS_EXCEPTIONS_ON", "CFLAGS_EXCEPTIONS_OFF",
"CFLAGS_RTTI_ON", "CFLAGS_RTTI_OFF",
"CFLAGS_STL_ON", "CFLAGS_STL_OFF",
"CFLAGS_WARN_ON", "CFLAGS_WARN_OFF",
"CHK_DIR_EXISTS", "COPY",
"CXX",
"CXXFLAGS",
"CXXFLAGS_RELEASE", "CXXFLAGS_DEBUG",
"CXXFLAGS_CONSOLE", "CXXFLAGS_SHLIB", "CXXFLAGS_APP", "CXXFLAGS_THREAD",
"CXXFLAGS_MT", "CXXFLAGS_MT_DBG", "CXXFLAGS_MT_DLL", "CXXFLAGS_MT_DLLDBG",
"CXXFLAGS_EXCEPTIONS_ON", "CXXFLAGS_EXCEPTIONS_OFF",
"CXXFLAGS_RTTI_ON", "CXXFLAGS_RTTI_OFF",
"CXXFLAGS_STL_ON", "CXXFLAGS_STL_OFF",
"CXXFLAGS_WARN_ON", "CXXFLAGS_WARN_OFF",
"DEL_FILE",
"EXTENSION_SHLIB", "EXTENSION_PLUGIN",
"INCDIR", "INCDIR_X11", "INCDIR_OPENGL",
"LIBS_CORE", "LIBS_GUI", "LIBS_NETWORK", "LIBS_OPENGL", "LIBS_WEBKIT",
"LINK", "LINK_SHLIB", "AIX_SHLIB", "LINK_SHLIB_CMD",
"LFLAGS", "LFLAGS_CONSOLE", "LFLAGS_CONSOLE_DLL", "LFLAGS_DEBUG",
"LFLAGS_DLL",
"LFLAGS_PLUGIN", "LFLAGS_RELEASE", "LFLAGS_SHLIB", "LFLAGS_SONAME",
"LFLAGS_THREAD", "LFLAGS_WINDOWS", "LFLAGS_WINDOWS_DLL", "LFLAGS_OPENGL",
"LIBDIR", "LIBDIR_X11", "LIBDIR_OPENGL",
"LIBS", "LIBS_CONSOLE", "LIBS_RT",
"LIBS_RTMT", "LIBS_THREAD", "LIBS_WINDOWS", "LIBS_X11",
"MAKEFILE_GENERATOR",
"MKDIR",
"RPATH", "LFLAGS_RPATH",
"AR", "RANLIB", "LIB", "STRIP"
]
def show_platforms():
"""Display the different platform/compilers.
"""
sys.stdout.write("""
The following platform/compiler configurations are supported:
""")
platform_specs.sort()
sys.stdout.write(siputils.format(", ".join(platform_specs), leftmargin=2))
sys.stdout.write("\n\n")
def show_macros():
"""Display the different build macros.
"""
sys.stdout.write("""
The following options may be used to adjust the compiler configuration:
""")
build_macro_names.sort()
sys.stdout.write(siputils.format(", ".join(build_macro_names), leftmargin=2))
sys.stdout.write("\n\n")
def set_build_platform():
""" Initialise the build platform. """
global build_platform
# Set the platform specific default specification.
platdefaults = {
"aix": "aix-xlc",
"bsd": "bsdi-g++",
"cygwin": "cygwin-g++",
"darwin": "macx-g++",
"dgux": "dgux-g++",
"freebsd": "freebsd-g++",
"gnu": "hurd-g++",
"hp-ux": "hpux-acc",
"irix": "irix-cc",
"linux": "linux-g++",
"lynxos": "lynxos-g++",
"netbsd": "netbsd-g++",
"openbsd": "openbsd-g++",
"openunix": "unixware-cc",
"osf1": "tru64-cxx",
"qnx": "qnx-g++",
"reliantunix": "reliant-cds",
"sco_sv": "sco-cc",
"sinix": "reliant-cds",
"sunos5": "solaris-cc",
"ultrix": "ultrix-g++",
"unix_sv": "unixware-g++",
"unixware": "unixware-cc"
}
build_platform = "none"
if py_platform == "win32":
if py_version >= 0x030300:
build_platform = "win32-msvc2010"
elif py_version >= 0x020600:
build_platform = "win32-msvc2008"
elif py_version >= 0x020400:
build_platform = "win32-msvc.net"
else:
build_platform = "win32-msvc"
else:
for pd in list(platdefaults.keys()):
if py_platform[:len(pd)] == pd:
build_platform = platdefaults[pd]
break
def inform_user():
""" Tell the user the option values that are going to be used. """
if not opts.no_tools:
siputils.inform("The SIP code generator will be installed in %s." % sip_bin_dir)
siputils.inform("The %s module will be installed in %s." % (sip_module_base, sip_module_dir))
if opts.static:
siputils.inform("The %s module will be built as a static library." % sip_module_base)
siputils.inform("The sip.h header file will be installed in %s." % sip_inc_dir)
siputils.inform("The default directory to install .sip files in is %s." % sip_sip_dir)
if opts.use_qmake is None:
siputils.inform("The platform/compiler configuration is %s." % build_platform)
if opts.arch:
siputils.inform("MacOS/X binaries will be created for %s." % (", ".join(opts.arch.split())))
if opts.universal:
siputils.inform("MacOS/X universal binaries will be created using %s." % opts.universal)
if opts.deployment_target:
siputils.inform("MacOS/X deployment target is %s." % opts.deployment_target)
def set_platform_directories():
""" Initialise the global variables relating to platform-specific
directories.
"""
global plat_py_site_dir, plat_py_inc_dir, plat_py_conf_inc_dir
global plat_bin_dir, plat_py_lib_dir, plat_sip_dir
# We trust distutils for some stuff.
plat_py_site_dir = sysconfig.get_python_lib(plat_specific=1)
plat_py_inc_dir = sysconfig.get_python_inc()
plat_py_conf_inc_dir = os.path.dirname(sysconfig.get_config_h_filename())
if sys.platform == "win32":
plat_py_lib_dir = sys.prefix + "\\libs"
plat_bin_dir = sys.exec_prefix
plat_sip_dir = sys.prefix + "\\sip"
else:
lib_dir = sysconfig.get_python_lib(plat_specific=1, standard_lib=1)
plat_py_lib_dir = lib_dir + "/config"
plat_bin_dir = sys.exec_prefix + "/bin"
plat_sip_dir = sys.prefix + "/share/sip"
def patch_files():
"""Patch any files that need it."""
patched = (
("siplib", "sip.h"),
("siplib", "siplib.c"),
("siplib", "siplib.sbf")
)
# The siplib directory may not exist if we are building away from the
# source directory.
try:
os.mkdir("siplib")
except OSError:
pass
for f in patched:
dst_fn = os.path.join(*f)
src_fn = os.path.join(src_dir, dst_fn + ".in")
siputils.inform("Creating %s..." % dst_fn)
dst = open(dst_fn, "w")
src = open(src_fn)
for line in src:
line = line.replace("@CFG_MODULE_NAME@", opts.sip_module)
line = line.replace("@CFG_MODULE_BASENAME@", sip_module_base)
dst.write(line)
dst.close()
src.close()
def create_config(module, template, macros):
"""Create the SIP configuration module so that it can be imported by build
scripts.
module is the module file name.
template is the template file name.
macros is the dictionary of build macros.
"""
siputils.inform("Creating %s..." % module)
content = {
"sip_config_args": sys.argv[1:],
"sip_version": sip_version,
"sip_version_str": sip_version_str,
"platform": build_platform,
"sip_bin": os.path.join(sip_bin_dir, "sip"),
"sip_inc_dir": sip_inc_dir,
"sip_mod_dir": sip_module_dir,
"default_bin_dir": plat_bin_dir,
"default_mod_dir": plat_py_site_dir,
"default_sip_dir": sip_sip_dir,
"py_version": py_version,
"py_inc_dir": plat_py_inc_dir,
"py_conf_inc_dir": plat_py_conf_inc_dir,
"py_lib_dir": plat_py_lib_dir,
"universal": opts.universal,
"arch": opts.arch,
"deployment_target": opts.deployment_target,
"qt_framework": 0
}
siputils.create_config_module(module, template, content, macros)
def create_makefiles(macros):
"""Create the Makefiles.
macros is the dictionary of platform specific build macros.
"""
# Bootstrap. Make sure we get the right one.
sys.path.insert(0, os.path.curdir)
invalidate_caches()
import sipconfig
cfg = sipconfig.Configuration()
cfg.set_build_macros(macros)
if opts.no_tools:
subdirs = ["siplib"]
installs = None
else:
subdirs = ["sipgen", "siplib"]
installs = (["sipconfig.py", os.path.join(src_dir, "sipdistutils.py")],
cfg.sip_mod_dir)
if opts.use_qmake:
sipconfig.inform("Creating top level .pro file...")
pro = open("sip.pro", "w")
pro.write("TEMPLATE = subdirs\n")
pro.write("SUBDIRS = %s\n" % " ".join(subdirs))
if installs is not None:
files, path = installs
pro.write("\n")
pro.write("build_system.files = %s\n" % " ".join(files))
pro.write("build_system.path = %s\n" % quote(path))
pro.write("INSTALLS += build_system\n")
pro.close()
else:
sipconfig.inform("Creating top level Makefile...")
sipconfig.ParentMakefile(
configuration=cfg,
subdirs=subdirs,
installs=installs
).generate()
if opts.use_qmake:
sipconfig.inform("Creating sip code generator .pro file...")
pro = open(os.path.join("sipgen", "sipgen.pro"), "w")
pro.write("TEMPLATE = app\n")
pro.write("TARGET = sip\n")
pro.write("CONFIG -= qt app_bundle\n")
pro.write("CONFIG += warn_off exceptions_off console %s\n" % (
("debug" if opts.debug else "release")))
pro.write("\n")
pro.write("# Work around QTBUG-39300.\n")
pro.write("CONFIG -= android_install\n")
pro.write("\n")
pro.write("target.path = %s\n" % os.path.dirname(cfg.sip_bin))
pro.write("INSTALLS += target\n")
c_sources = get_sources("sipgen", "*.c")
pro.write("\n")
pro.write("SOURCES = %s\n" % " ".join(c_sources))
headers = get_sources("sipgen", "*.h")
pro.write("\n")
pro.write("HEADERS = %s\n" % " ".join(headers))
pro.close()
else:
sipconfig.inform("Creating sip code generator Makefile...")
sipconfig.ProgramMakefile(
configuration=cfg,
build_file=os.path.join(src_dir, "sipgen", "sipgen.sbf"),
dir="sipgen",
install_dir=os.path.dirname(cfg.sip_bin),
console=1,
warnings=0,
universal=opts.universal,
arch=opts.arch,
deployment_target=opts.deployment_target
).generate()
if opts.use_qmake:
sipconfig.inform("Creating sip module .pro file...")
pro = open(os.path.join("siplib", "siplib.pro"), "w")
pro.write("TEMPLATE = lib\n")
pro.write("TARGET = %s\n" % sip_module_base)
pro.write("CONFIG -= qt\n")
pro.write("CONFIG += warn_on exceptions_off %s %s\n" % (
("staticlib" if opts.static else "plugin"),
("debug" if opts.debug else "release")))
pro.write("\n")
pro.write("# Work around QTBUG-39300.\n")
pro.write("CONFIG -= android_install\n")
pro.write("\n")
pro.write("INCLUDEPATH += %s\n" % cfg.py_inc_dir)
if cfg.py_conf_inc_dir != cfg.py_inc_dir:
pro.write("INCLUDEPATH += %s\n" % cfg.py_conf_inc_dir)
if not opts.static:
# These only need to be correct for Windows.
debug_suffix = "_d" if opts.debug else ""
link_lib_dir = quote("-L" + cfg.py_lib_dir)
pro.write("""
win32 {
PY_MODULE = %s%s.pyd
target.files = %s%s.pyd
LIBS += %s -lpython%d.%d
QMAKE_POST_LINK = $(COPY_FILE) $(DESTDIR_TARGET) $$PY_MODULE
} else {
PY_MODULE = %s.so
target.files = %s.so
QMAKE_POST_LINK = $(COPY_FILE) $(TARGET) $$PY_MODULE
}
macx {
QMAKE_LFLAGS += "-undefined dynamic_lookup"
QMAKE_LFLAGS += "-install_name $$absolute_path($$PY_MODULE, $$target.path)"
}
""" % (sip_module_base, debug_suffix,
sip_module_base, debug_suffix,
link_lib_dir, (py_version >> 16), ((py_version >> 8) & 0xff),
sip_module_base,
sip_module_base))
pro.write("\n")
pro.write("target.CONFIG = no_check_exist\n")
pro.write("target.path = %s\n" % cfg.sip_mod_dir)
pro.write("INSTALLS += target\n")
pro.write("\n")
pro.write("sip_h.files = sip.h\n")
pro.write("sip_h.path = %s\n" % cfg.sip_inc_dir)
pro.write("INSTALLS += sip_h\n")
c_sources = get_sources("siplib", "*.c")
cpp_sources = get_sources("siplib", "*.cpp")
pro.write("\n")
pro.write("SOURCES = %s\n" % " ".join(c_sources + cpp_sources))
headers = get_sources("siplib", "*.h")
pro.write("\n")
pro.write("HEADERS = %s\n" % " ".join(headers))
pro.close()
else:
sipconfig.inform("Creating sip module Makefile...")
sipconfig.ModuleMakefile(
configuration=cfg,
build_file=os.path.join(src_dir, "siplib", "siplib.sbf"),
dir="siplib",
install_dir=cfg.sip_mod_dir,
installs=([os.path.join(src_dir, "siplib", "sip.h")], cfg.sip_inc_dir),
console=1,
warnings=0,
static=opts.static,
debug=opts.debug,
universal=opts.universal,
arch=opts.arch,
deployment_target=opts.deployment_target
).generate()
def get_sources(sources_dir, ext):
""" Get the quoted files with the specified extension from a directory. """
return [quote(f) for f in glob.glob(os.path.join(src_dir, sources_dir, ext))]
def quote(path):
""" Return a path that is quoted if necessary. """
if " " in path:
path = '"' + path + '"'
return path
# Look out for recursive definitions.
_extrapolating = []
def _get_configuration_value(config, name, default=None):
""" Get a configuration value while extrapolating. """
value = config.get(name)
if value is None:
if default is None:
siputils.error("Configuration file references non-existent name '%s'." % name)
return default
parts = value.split('%(', 1)
while len(parts) == 2:
prefix, tail = parts
parts = tail.split(')', 1)
if len(parts) != 2:
siputils.error("Configuration file contains unterminated extrapolated name '%s'." % tail)
xtra_name, suffix = parts
if xtra_name in _extrapolating:
siputils.error("Configuration file contains a recursive reference to '%s'." % xtra_name)
_extrapolating.append(xtra_name)
xtra_value = _get_configuration_value(config, xtra_name)
_extrapolating.pop()
value = prefix + xtra_value + suffix
parts = value.split('%(', 1)
return value
def update_from_configuration_file(config_file):
""" Update a number of globals from values read from a configuration file.
"""
siputils.inform("Reading configuration from %s..." % config_file)
config = {}
# Read the file into the dict.
cfg = open(config_file)
line_nr = 0
for l in cfg:
line_nr += 1
# Strip comments and blank lines.
l = l.split('#')[0].strip()
if l == '':
continue
parts = l.split('=', 1)
if len(parts) == 2:
name = parts[0].strip()
value = parts[1].strip()
else:
name = value = ''
if name == '' or value == '':
siputils.error("%s:%d: Invalid line." % (config_file, line_nr))
config[name] = value
last_name = name
cfg.close()
# Enforce the presets.
version = siputils.version_to_string(py_version).split('.')
config['py_major'] = version[0]
config['py_minor'] = version[1]
config['sysroot'] = sysroot
# Override the relevent values.
global py_platform, plat_py_conf_inc_dir, plat_py_inc_dir, plat_py_lib_dir
global sip_bin_dir, sip_inc_dir, sip_module_dir, sip_sip_dir
py_platform = _get_configuration_value(config, 'py_platform', py_platform)
plat_py_inc_dir = _get_configuration_value(config, 'py_inc_dir',
plat_py_inc_dir)
plat_py_lib_dir = _get_configuration_value(config, 'py_pylib_dir',
plat_py_lib_dir)
# The pyconfig.h directory defaults to the Python.h directory.
plat_py_conf_inc_dir = _get_configuration_value(config, 'py_conf_inc_dir',
plat_py_inc_dir)
sip_bin_dir = _get_configuration_value(config, 'sip_bin_dir', sip_bin_dir)
sip_module_dir = _get_configuration_value(config, 'sip_module_dir',
sip_module_dir)
# Note that this defaults to any 'py_inc_dir' specified in the
# configuration file.
sip_inc_dir = _get_configuration_value(config, 'sip_inc_dir',
plat_py_inc_dir)
# Note that this is only used when creating sipconfig.py.
sip_sip_dir = _get_configuration_value(config, 'sip_sip_dir', sip_sip_dir)
def create_optparser(sdk_dir):
"""Create the parser for the command line.
"""
def store_abspath(option, opt_str, value, parser):
setattr(parser.values, option.dest, os.path.abspath(value))
def store_abspath_dir(option, opt_str, value, parser):
if not os.path.isdir(value):
raise optparse.OptionValueError("'%s' is not a directory" % value)
setattr(parser.values, option.dest, os.path.abspath(value))
def store_abspath_file(option, opt_str, value, parser):
if not os.path.isfile(value):
raise optparse.OptionValueError("'%s' is not a file" % value)
setattr(parser.values, option.dest, os.path.abspath(value))
def store_version(option, opt_str, value, parser):
version = version_from_string(value)
if version is None:
raise optparse.OptionValueError(
"'%s' is not a valid version number" % value)
setattr(parser.values, option.dest, version)
p = optparse.OptionParser(usage="python %prog [opts] [macro=value] "
"[macro+=value]", version=sip_version_str)
# Note: we don't use %default to be compatible with Python 2.3.
p.add_option("-k", "--static", action="store_true", default=False,
dest="static", help="build the SIP module as a static library")
p.add_option("-p", "--platform", action="store", type="string",
metavar="PLATFORM", dest="platform", help="the platform/compiler "
"configuration [default: %s]" % build_platform)
p.add_option("-u", "--debug", action="store_true", default=False,
help="build with debugging symbols")
p.add_option("--sip-module", action="store", default="sip", type="string",
metavar="NAME", dest="sip_module", help="the package.module name "
"of the sip module [default: sip]")
p.add_option("--configuration", dest='config_file', type='string',
action='callback', callback=store_abspath_file, metavar="FILE",
help="FILE contains the target configuration")
p.add_option("--target-py-version", dest='target_py_version',
type='string', action='callback', callback=store_version,
metavar="VERSION",
help="the major.minor version of the target Python [default: "
"%s]" % siputils.version_to_string(py_version, parts=2))
p.add_option("--sysroot", dest='sysroot', type='string', action='callback',
callback=store_abspath_dir, metavar="DIR",
help="DIR is the target system root directory")
p.add_option("--no-tools", action="store_true", default=False,
dest="no_tools", help="disable the building of the code generator "
"and the installation of the build system [default: enabled]")
p.add_option("--use-qmake", action="store_true", default=False,
dest="use_qmake", help="generate qmake .pro files instead of "
"Makefiles")
if sys.platform == 'darwin':
# Get the latest SDK to use as the default.
sdks = glob.glob(sdk_dir + '/MacOSX*.sdk')
if len(sdks) > 0:
sdks.sort()
_, default_sdk = os.path.split(sdks[-1])
else:
default_sdk = 'MacOSX10.4u.sdk'
g = optparse.OptionGroup(p, title="MacOS X Configuration")
g.add_option("--arch", action="append", default=[], dest="arch",
choices=["i386", "x86_64", "ppc"],
help="build for architecture ARCH")
g.add_option("--deployment-target", action="store", default='',
metavar="VERSION", dest="deployment_target",
help="set the value of the MACOSX_DEPLOYMENT_TARGET "
"environment variable in generated Makefiles")
g.add_option("-n", "--universal", action="store_true", default=False,
dest="universal",
help="build the SIP code generator and module as universal "
"binaries")
g.add_option("-s", "--sdk", action="store", default=default_sdk,
type="string", metavar="SDK", dest="sdk",
help="the name of the SDK used when building universal "
"binaries [default: %s]" % default_sdk)
p.add_option_group(g)
# Querying.
g = optparse.OptionGroup(p, title="Query")
g.add_option("--show-platforms", action="store_true", default=False,
dest="show_platforms", help="show the list of supported "
"platform/compiler configurations")
g.add_option("--show-build-macros", action="store_true", default=False,
dest="show_build_macros", help="show the list of supported build "
"macros")
p.add_option_group(g)
# Installation.
g = optparse.OptionGroup(p, title="Installation")
g.add_option("-b", "--bindir", action="callback", type="string",
metavar="DIR", dest="sipbindir", callback=store_abspath,
help="where the SIP code generator will be installed [default: "
"%s]" % plat_bin_dir)
g.add_option("-d", "--destdir", action="callback", type="string",
metavar="DIR", dest="sipmoddir", callback=store_abspath,
help="where the SIP module will be installed [default: "
"%s]" % plat_py_site_dir)
g.add_option("-e", "--incdir", action="callback", type="string",
metavar="DIR", dest="sipincdir", callback=store_abspath,
help="where the SIP header file will be installed [default: "
"%s]" % plat_py_inc_dir)
g.add_option("-v", "--sipdir", action="callback", type="string",
metavar="DIR", dest="sipsipdir", callback=store_abspath,
help="where .sip files are normally installed [default: "
"%s]" % plat_sip_dir)
p.add_option_group(g)
return p
def main(argv):
"""Create the configuration module module.
argv is the list of command line arguments.
"""
siputils.inform("This is SIP %s for Python %s on %s." % (sip_version_str, sys.version.split()[0], sys.platform))
global py_version, build_platform
if py_version < 0x020300:
siputils.error("This version of SIP requires Python v2.3 or later.")
# Basic initialisation.
set_platform_directories()
set_build_platform()
# Build up the list of valid specs.
for s in os.listdir(os.path.join(src_dir, "specs")):
platform_specs.append(s)
# Determine the directory containing the default OS/X SDK.
if sys.platform == 'darwin':
for sdk_dir in MACOSX_SDK_DIRS:
if os.path.isdir(sdk_dir):
break
else:
sdk_dir = MACOSX_SDK_DIRS[0]
else:
sdk_dir = ''
# Parse the command line.
global opts
p = create_optparser(sdk_dir)
opts, args = p.parse_args()
# Override defaults that affect subsequent configuration.
if opts.target_py_version is not None:
py_version = opts.target_py_version
if opts.sysroot is not None:
global sysroot
sysroot = opts.sysroot
# Make sure MacOS specific options get initialised.
if sys.platform != 'darwin':
opts.universal = ''
opts.arch = []
opts.sdk = ''
opts.deployment_target = ''
# Handle the query options.
if opts.show_platforms or opts.show_build_macros:
if opts.show_platforms:
show_platforms()
if opts.show_build_macros:
show_macros()
sys.exit()
# Convert the list 'arch' option to a string. Multiple architectures
# imply a universal binary.
if len(opts.arch) > 1:
opts.universal = True
opts.arch = ' '.join(opts.arch)
# Convert the boolean 'universal' option to a string.
if opts.universal:
if '/' in opts.sdk:
opts.universal = os.path.abspath(opts.sdk)
else:
opts.universal = sdk_dir + '/' + opts.sdk
if not os.path.isdir(opts.universal):
siputils.error("Unable to find the SDK directory %s. Use the --sdk flag to specify the name of the SDK or its full path." % opts.universal)
if opts.arch == '':
opts.arch = DEFAULT_MACOSX_ARCH
else:
opts.universal = ''
# Apply the overrides from any configuration file.
global plat_bin_dir, plat_py_conf_inc_dir, plat_py_inc_dir
global plat_py_lib_dir, plat_py_site_dir, plat_sip_dir
global sip_bin_dir, sip_inc_dir, sip_module_dir, sip_sip_dir
# Set defaults.
sip_bin_dir = plat_bin_dir
sip_inc_dir = plat_py_inc_dir
sip_module_dir = plat_py_site_dir
sip_sip_dir = plat_sip_dir
if opts.config_file is not None:
update_from_configuration_file(opts.config_file)
elif sysroot != '':
def apply_sysroot(d):
if d.startswith(sys.prefix):
d = sysroot + d[len(sys.prefix):]
return d
plat_bin_dir = apply_sysroot(plat_bin_dir)
plat_py_conf_inc_dir = apply_sysroot(plat_py_conf_inc_dir)
plat_py_inc_dir = apply_sysroot(plat_py_inc_dir)
plat_py_lib_dir = apply_sysroot(plat_py_lib_dir)
plat_py_site_dir = apply_sysroot(plat_py_site_dir)
plat_sip_dir = apply_sysroot(plat_sip_dir)
sip_bin_dir = apply_sysroot(sip_bin_dir)
sip_inc_dir = apply_sysroot(sip_inc_dir)
sip_module_dir = apply_sysroot(sip_module_dir)
sip_sip_dir = apply_sysroot(sip_sip_dir)
# Override from the command line.
if opts.platform is not None:
build_platform = opts.platform
if opts.sipbindir is not None:
sip_bin_dir = opts.sipbindir
if opts.sipincdir is not None:
sip_inc_dir = opts.sipincdir
if opts.sipmoddir is not None:
sip_module_dir = opts.sipmoddir
if opts.sipsipdir is not None:
sip_sip_dir = opts.sipsipdir
# Get the platform specific macros for building.
macros = siputils.parse_build_macros(
os.path.join(src_dir, "specs", build_platform), build_macro_names,
args)
if macros is None:
siputils.error("Unsupported macro name specified. Use the --show-build-macros flag to see a list of supported macros.")
sys.exit(2)
# Fix the name of the sip module.
global sip_module_base
module_path = opts.sip_module.split(".")
sip_module_base = module_path[-1]
if len(module_path) > 1:
del module_path[-1]
module_path.insert(0, sip_module_dir)
sip_module_dir = os.path.join(*module_path)
# Tell the user what's been found.
inform_user()
# Patch any files that need it.
patch_files()
# Install the configuration module.
create_config("sipconfig.py", os.path.join(src_dir, "siputils.py"),
macros)
# Create the Makefiles.
create_makefiles(macros)
###############################################################################
# The script starts here.
###############################################################################
if __name__ == "__main__":
try:
main(sys.argv)
except SystemExit:
raise
except:
sys.stderr.write(
"""An internal error occured. Please report all the output from the program,
including the following traceback, to support@riverbankcomputing.com.
""")
raise
| mit |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/flask/debughelpers.py | 777 | 3508 | # -*- coding: utf-8 -*-
"""
flask.debughelpers
~~~~~~~~~~~~~~~~~~
Various helpers to make the development experience better.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from ._compat import implements_to_string
class UnexpectedUnicodeError(AssertionError, UnicodeError):
"""Raised in places where we want some better error reporting for
unexpected unicode or binary data.
"""
@implements_to_string
class DebugFilesKeyError(KeyError, AssertionError):
"""Raised from request.files during debugging. The idea is that it can
provide a better error message than just a generic KeyError/BadRequest.
"""
def __init__(self, request, key):
form_matches = request.form.getlist(key)
buf = ['You tried to access the file "%s" in the request.files '
'dictionary but it does not exist. The mimetype for the request '
'is "%s" instead of "multipart/form-data" which means that no '
'file contents were transmitted. To fix this error you should '
'provide enctype="multipart/form-data" in your form.' %
(key, request.mimetype)]
if form_matches:
buf.append('\n\nThe browser instead transmitted some file names. '
'This was submitted: %s' % ', '.join('"%s"' % x
for x in form_matches))
self.msg = ''.join(buf)
def __str__(self):
return self.msg
class FormDataRoutingRedirect(AssertionError):
"""This exception is raised by Flask in debug mode if it detects a
redirect caused by the routing system when the request method is not
GET, HEAD or OPTIONS. Reasoning: form data will be dropped.
"""
def __init__(self, request):
exc = request.routing_exception
buf = ['A request was sent to this URL (%s) but a redirect was '
'issued automatically by the routing system to "%s".'
% (request.url, exc.new_url)]
# In case just a slash was appended we can be extra helpful
if request.base_url + '/' == exc.new_url.split('?')[0]:
buf.append(' The URL was defined with a trailing slash so '
'Flask will automatically redirect to the URL '
'with the trailing slash if it was accessed '
'without one.')
buf.append(' Make sure to directly send your %s-request to this URL '
'since we can\'t make browsers or HTTP clients redirect '
'with form data reliably or without user interaction.' %
request.method)
buf.append('\n\nNote: this exception is only raised in debug mode')
AssertionError.__init__(self, ''.join(buf).encode('utf-8'))
def attach_enctype_error_multidict(request):
"""Since Flask 0.8 we're monkeypatching the files object in case a
request is detected that does not use multipart form data but the files
object is accessed.
"""
oldcls = request.files.__class__
class newcls(oldcls):
def __getitem__(self, key):
try:
return oldcls.__getitem__(self, key)
except KeyError as e:
if key not in request.form:
raise
raise DebugFilesKeyError(request, key)
newcls.__name__ = oldcls.__name__
newcls.__module__ = oldcls.__module__
request.files.__class__ = newcls
| gpl-2.0 |
bpramod/azure-linux-extensions | DSC/azure/http/batchclient.py | 47 | 13769 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import uuid
from azure import (
_update_request_uri_query,
WindowsAzureError,
WindowsAzureBatchOperationError,
_get_children_from_path,
url_unquote,
_ERROR_CANNOT_FIND_PARTITION_KEY,
_ERROR_CANNOT_FIND_ROW_KEY,
_ERROR_INCORRECT_TABLE_IN_BATCH,
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH,
_ERROR_BATCH_COMMIT_FAIL,
)
from azure.http import HTTPError, HTTPRequest, HTTPResponse
from azure.http.httpclient import _HTTPClient
from azure.storage import (
_update_storage_table_header,
METADATA_NS,
_sign_storage_table_request,
)
from xml.dom import minidom
_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices'
if sys.version_info < (3,):
def _new_boundary():
return str(uuid.uuid1())
else:
def _new_boundary():
return str(uuid.uuid1()).encode('utf-8')
class _BatchClient(_HTTPClient):
'''
This is the class that is used for batch operation for storage table
service. It only supports one changeset.
'''
def __init__(self, service_instance, account_key, account_name,
protocol='http'):
_HTTPClient.__init__(self, service_instance, account_name=account_name,
account_key=account_key, protocol=protocol)
self.is_batch = False
self.batch_requests = []
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
def get_request_table(self, request):
'''
Extracts table name from request.uri. The request.uri has either
"/mytable(...)" or "/mytable" format.
request: the request to insert, update or delete entity
'''
if '(' in request.path:
pos = request.path.find('(')
return request.path[1:pos]
else:
return request.path[1:]
def get_request_partition_key(self, request):
'''
Extracts PartitionKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the PartitionKey is in the request body.
request: the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = minidom.parseString(request.body)
part_key = _get_children_from_path(
doc, 'entry', 'content', (METADATA_NS, 'properties'),
(_DATASERVICES_NS, 'PartitionKey'))
if not part_key:
raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return part_key[0].firstChild.nodeValue
else:
uri = url_unquote(request.path)
pos1 = uri.find('PartitionKey=\'')
pos2 = uri.find('\',', pos1)
if pos1 == -1 or pos2 == -1:
raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return uri[pos1 + len('PartitionKey=\''):pos2]
def get_request_row_key(self, request):
'''
Extracts RowKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the Rowkey is in the request body.
request: the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = minidom.parseString(request.body)
row_key = _get_children_from_path(
doc, 'entry', 'content', (METADATA_NS, 'properties'),
(_DATASERVICES_NS, 'RowKey'))
if not row_key:
raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)
return row_key[0].firstChild.nodeValue
else:
uri = url_unquote(request.path)
pos1 = uri.find('RowKey=\'')
pos2 = uri.find('\')', pos1)
if pos1 == -1 or pos2 == -1:
raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)
row_key = uri[pos1 + len('RowKey=\''):pos2]
return row_key
def validate_request_table(self, request):
'''
Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request: the request to insert, update or delete entity
'''
if self.batch_table:
if self.get_request_table(request) != self.batch_table:
raise WindowsAzureError(_ERROR_INCORRECT_TABLE_IN_BATCH)
else:
self.batch_table = self.get_request_table(request)
def validate_request_partition_key(self, request):
'''
Validates that all requests have the same PartitiionKey. Set the
PartitionKey if it is the first request for the batch operation.
request: the request to insert, update or delete entity
'''
if self.batch_partition_key:
if self.get_request_partition_key(request) != \
self.batch_partition_key:
raise WindowsAzureError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
else:
self.batch_partition_key = self.get_request_partition_key(request)
def validate_request_row_key(self, request):
'''
Validates that all requests have the different RowKey and adds RowKey
to existing RowKey list.
request: the request to insert, update or delete entity
'''
if self.batch_row_keys:
if self.get_request_row_key(request) in self.batch_row_keys:
raise WindowsAzureError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
else:
self.batch_row_keys.append(self.get_request_row_key(request))
def begin_batch(self):
'''
Starts the batch operation. Intializes the batch variables
is_batch: batch operation flag.
batch_table: the table name of the batch operation
batch_partition_key: the PartitionKey of the batch requests.
batch_row_keys: the RowKey list of adding requests.
batch_requests: the list of the requests.
'''
self.is_batch = True
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
self.batch_requests = []
def insert_request_to_batch(self, request):
'''
Adds request to batch operation.
request: the request to insert, update or delete entity
'''
self.validate_request_table(request)
self.validate_request_partition_key(request)
self.validate_request_row_key(request)
self.batch_requests.append(request)
def commit_batch(self):
''' Resets batch flag and commits the batch requests. '''
if self.is_batch:
self.is_batch = False
self.commit_batch_requests()
def commit_batch_requests(self):
''' Commits the batch requests. '''
batch_boundary = b'batch_' + _new_boundary()
changeset_boundary = b'changeset_' + _new_boundary()
# Commits batch only the requests list is not empty.
if self.batch_requests:
request = HTTPRequest()
request.method = 'POST'
request.host = self.batch_requests[0].host
request.path = '/$batch'
request.headers = [
('Content-Type', 'multipart/mixed; boundary=' + \
batch_boundary.decode('utf-8')),
('Accept', 'application/atom+xml,application/xml'),
('Accept-Charset', 'UTF-8')]
request.body = b'--' + batch_boundary + b'\n'
request.body += b'Content-Type: multipart/mixed; boundary='
request.body += changeset_boundary + b'\n\n'
content_id = 1
# Adds each request body to the POST data.
for batch_request in self.batch_requests:
request.body += b'--' + changeset_boundary + b'\n'
request.body += b'Content-Type: application/http\n'
request.body += b'Content-Transfer-Encoding: binary\n\n'
request.body += batch_request.method.encode('utf-8')
request.body += b' http://'
request.body += batch_request.host.encode('utf-8')
request.body += batch_request.path.encode('utf-8')
request.body += b' HTTP/1.1\n'
request.body += b'Content-ID: '
request.body += str(content_id).encode('utf-8') + b'\n'
content_id += 1
# Add different headers for different type requests.
if not batch_request.method == 'DELETE':
request.body += \
b'Content-Type: application/atom+xml;type=entry\n'
for name, value in batch_request.headers:
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n'
break
request.body += b'Content-Length: '
request.body += str(len(batch_request.body)).encode('utf-8')
request.body += b'\n\n'
request.body += batch_request.body + b'\n'
else:
for name, value in batch_request.headers:
# If-Match should be already included in
# batch_request.headers, but in case it is missing,
# just add it.
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n\n'
break
else:
request.body += b'If-Match: *\n\n'
request.body += b'--' + changeset_boundary + b'--' + b'\n'
request.body += b'--' + batch_boundary + b'--'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_storage_table_header(request)
auth = _sign_storage_table_request(request,
self.account_name,
self.account_key)
request.headers.append(('Authorization', auth))
# Submit the whole request as batch request.
response = self.perform_request(request)
if response.status >= 300:
raise HTTPError(response.status,
_ERROR_BATCH_COMMIT_FAIL,
self.respheader,
response.body)
# http://www.odata.org/documentation/odata-version-2-0/batch-processing/
# The body of a ChangeSet response is either a response for all the
# successfully processed change request within the ChangeSet,
# formatted exactly as it would have appeared outside of a batch,
# or a single response indicating a failure of the entire ChangeSet.
responses = self._parse_batch_response(response.body)
if responses and responses[0].status >= 300:
self._report_batch_error(responses[0])
def cancel_batch(self):
''' Resets the batch flag. '''
self.is_batch = False
def _parse_batch_response(self, body):
parts = body.split(b'--changesetresponse_')
responses = []
for part in parts:
httpLocation = part.find(b'HTTP/')
if httpLocation > 0:
response = self._parse_batch_response_part(part[httpLocation:])
responses.append(response)
return responses
def _parse_batch_response_part(self, part):
lines = part.splitlines();
# First line is the HTTP status/reason
status, _, reason = lines[0].partition(b' ')[2].partition(b' ')
# Followed by headers and body
headers = []
body = b''
isBody = False
for line in lines[1:]:
if line == b'' and not isBody:
isBody = True
elif isBody:
body += line
else:
headerName, _, headerVal = line.partition(b':')
headers.append((headerName.lower(), headerVal))
return HTTPResponse(int(status), reason.strip(), headers, body)
def _report_batch_error(self, response):
xml = response.body.decode('utf-8')
doc = minidom.parseString(xml)
n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'code')
code = n[0].firstChild.nodeValue if n and n[0].firstChild else ''
n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'message')
message = n[0].firstChild.nodeValue if n and n[0].firstChild else xml
raise WindowsAzureBatchOperationError(message, code)
| apache-2.0 |
ZTE-Dev/android_kernel_zte_p892e10 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
Dev-Cloud-Platform/Dev-Cloud | dev_cloud/cc1/src/wi/tests/registration_test.py | 1 | 3217 | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
# -*- coding: utf-8 -*-
"""@package src.wi.tests.registration_test
@author Piotr Wójcik
@author Krzysztof Danielowski
@date 11.10.2012
"""
from wi.tests import WiTestCase
import unittest
import random
class RegistrationTests(WiTestCase, unittest.TestCase):
def _fill_common_data(self, field_key=None, field_value=None):
driver = self.driver
self.base_url = self.TEST_SERVER
driver.get(self.base_url + "/registration/register/")
self.change_language()
self.wait_for_text("//div[@id='content']/div[2]/div/div[5]/h2", ["Registration"])
driver.find_element_by_id("id_login").clear()
driver.find_element_by_id("id_login").send_keys("witest" + str(random.randint(1, 100000)))
driver.find_element_by_id("id_first").clear()
driver.find_element_by_id("id_first").send_keys("test")
driver.find_element_by_id("id_last").clear()
driver.find_element_by_id("id_last").send_keys("test")
driver.find_element_by_id("id_organization").clear()
driver.find_element_by_id("id_organization").send_keys("test")
driver.find_element_by_id("id_email").clear()
driver.find_element_by_id("id_email").send_keys("witest" + str(random.randint(1, 100000)) + "@witest.pl")
driver.find_element_by_id("id_new_password").clear()
driver.find_element_by_id("id_new_password").send_keys("test1")
driver.find_element_by_id("id_password2").clear()
driver.find_element_by_id("id_password2").send_keys("test1")
if field_key is not None:
driver.find_element_by_id(field_key).clear()
driver.find_element_by_id(field_key).send_keys(field_value)
driver.find_element_by_css_selector("input.big_button").click()
@unittest.skip('a')
def test_1_registration_success(self):
driver = self.driver
self._fill_common_data()
self.assertEqual("Registration success - Registration - CC1", driver.title)
def test_2_registration_login_duplicate(self):
self._fill_common_data("id_login", self.TEST_USER['login'])
self.wait_for_text("//form[@id='registration-form']/fieldset/div/ul/li",
["A user with that login already exists."])
def test_3_registration_wrong_email(self):
self._fill_common_data("id_email", "witest" + str(random.randint(1, 100000)) + "@witestpl")
self.wait_for_text("//form[@id='registration-form']/fieldset/div/ul/li", ["Enter a valid email address."])
| apache-2.0 |
marcosbontempo/inatelos | poky-daisy/scripts/lib/mic/3rdparty/pykickstart/handlers/control.py | 10 | 50993 | #
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.version import *
from pykickstart.commands import *
# This map is keyed on kickstart syntax version as provided by
# pykickstart.version. Within each sub-dict is a mapping from command name
# to the class that handles it. This is an onto mapping - that is, multiple
# command names can map to the same class. However, the Handler will ensure
# that only one instance of each class ever exists.
commandMap = {
FC3: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC3_Bootloader,
"cdrom": method.FC3_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"driverdisk": driverdisk.FC3_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC3_Reboot,
"harddrive": method.FC3_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC3_LangSupport,
"lilo": bootloader.FC3_Bootloader,
"lilocheck": lilocheck.FC3_LiloCheck,
"logvol": logvol.FC3_LogVol,
"monitor": monitor.FC3_Monitor,
"mouse": mouse.FC3_Mouse,
"network": network.FC3_Network,
"nfs": method.FC3_Method,
"part": partition.FC3_Partition,
"partition": partition.FC3_Partition,
"poweroff": reboot.FC3_Reboot,
"raid": raid.FC3_Raid,
"reboot": reboot.FC3_Reboot,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"shutdown": reboot.FC3_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC3_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC3_Method,
"vnc": vnc.FC3_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC3_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on fc3
FC4: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC4_Bootloader,
"cdrom": method.FC3_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC3_Reboot,
"harddrive": method.FC3_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC3_LangSupport,
"logvol": logvol.FC4_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC3_Monitor,
"mouse": mouse.FC3_Mouse,
"network": network.FC4_Network,
"nfs": method.FC3_Method,
"part": partition.FC4_Partition,
"partition": partition.FC4_Partition,
"poweroff": reboot.FC3_Reboot,
"raid": raid.FC4_Raid,
"reboot": reboot.FC3_Reboot,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"shutdown": reboot.FC3_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC3_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC3_Method,
"vnc": vnc.FC3_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC3_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on fc4
FC5: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC4_Bootloader,
"cdrom": method.FC3_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC3_Reboot,
"harddrive": method.FC3_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC5_LangSupport,
"logvol": logvol.FC4_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC3_Monitor,
"mouse": mouse.FC3_Mouse,
"network": network.FC4_Network,
"nfs": method.FC3_Method,
"part": partition.FC4_Partition,
"partition": partition.FC4_Partition,
"poweroff": reboot.FC3_Reboot,
"raid": raid.FC5_Raid,
"reboot": reboot.FC3_Reboot,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"shutdown": reboot.FC3_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC3_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC3_Method,
"vnc": vnc.FC3_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC3_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on fc5
FC6: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC4_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.FC6_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC5_LangSupport,
"logging": logging.FC6_Logging,
"logvol": logvol.FC4_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC6_Monitor,
"mouse": mouse.FC3_Mouse,
"multipath": multipath.FC6_MultiPath,
"network": network.FC6_Network,
"nfs": method.FC6_Method,
"part": partition.FC4_Partition,
"partition": partition.FC4_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.FC5_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.FC6_Repo,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"user": user.FC6_User,
"url": method.FC6_Method,
"vnc": vnc.FC6_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC6_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on fc6
F7: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC4_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.FC6_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.FC4_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC6_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.FC6_Network,
"nfs": method.FC6_Method,
"part": partition.FC4_Partition,
"partition": partition.FC4_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F7_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.FC6_Repo,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC6_Method,
"user": user.FC6_User,
"vnc": vnc.FC6_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC6_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f7
F8: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F8_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.FC6_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.FC4_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC6_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F8_Network,
"nfs": method.FC6_Method,
"part": partition.FC4_Partition,
"partition": partition.FC4_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F7_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F8_Repo,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC6_Method,
"user": user.F8_User,
"vnc": vnc.FC6_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC6_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f8
F9: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F9_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F8_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.F9_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.FC6_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F9_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC6_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.FC6_Method,
"part": partition.F9_Partition,
"partition": partition.F9_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F9_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F8_Repo,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC6_Method,
"user": user.F8_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F9_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f9
F10: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F9_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F8_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.F10_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F9_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.FC6_Method,
"part": partition.F9_Partition,
"partition": partition.F9_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F9_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F8_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC6_Method,
"user": user.F8_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F10_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f10
F11: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F9_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F8_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.F10_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F9_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.FC6_Method,
"part": partition.F11_Partition,
"partition": partition.F11_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F9_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F11_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.FC6_Method,
"user": user.F8_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F10_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f11
F12: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F12_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F12_DriverDisk,
"fcoe": fcoe.F12_Fcoe,
"firewall": firewall.F10_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F12_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.FC6_Method,
"part": partition.F12_Partition,
"partition": partition.F12_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F12_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F11_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.FC6_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F10_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F12_ZFCP,
},
# based on f12
F13: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F12_Bootloader,
"cdrom": method.F13_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F12_DriverDisk,
"fcoe": fcoe.F13_Fcoe,
"firewall": firewall.F10_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.F13_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F12_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.F13_Method,
"part": partition.F12_Partition,
"partition": partition.F12_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F13_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F13_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"sshpw": sshpw.F13_SshPw,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.F13_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F10_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F12_ZFCP,
},
# based on f13
F14: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F14_Bootloader,
"cdrom": method.F14_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F14_DriverDisk,
"fcoe": fcoe.F13_Fcoe,
"firewall": firewall.F14_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.F14_Method,
"ignoredisk": ignoredisk.F14_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"interactive": interactive.F14_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F14_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.F14_Method,
"part": partition.F14_Partition,
"partition": partition.F14_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F14_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F14_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"sshpw": sshpw.F13_SshPw,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.F14_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F14_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F14_ZFCP,
},
# based on f14
F15: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F15_Bootloader,
"cdrom": method.F14_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F14_DriverDisk,
"fcoe": fcoe.F13_Fcoe,
"firewall": firewall.F14_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.F14_Method,
"ignoredisk": ignoredisk.F14_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F15_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F9_Network,
"nfs": method.F14_Method,
"part": partition.F14_Partition,
"partition": partition.F14_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F15_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F15_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"sshpw": sshpw.F13_SshPw,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.F14_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F14_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F14_ZFCP,
},
# based on f15
F16: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.F15_Bootloader,
"cdrom": method.F14_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F14_DriverDisk,
"fcoe": fcoe.F13_Fcoe,
"firewall": firewall.F14_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.F14_Method,
"ignoredisk": ignoredisk.F14_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F15_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.F16_Network,
"nfs": method.F14_Method,
"part": partition.F14_Partition,
"partition": partition.F14_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F15_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.F15_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"sshpw": sshpw.F13_SshPw,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.F14_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F14_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F14_ZFCP,
},
# based on fc1
RHEL3: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC3_Bootloader,
"cdrom": method.FC3_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"driverdisk": driverdisk.FC3_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC3_Reboot,
"harddrive": method.FC3_Method,
"ignoredisk": ignoredisk.FC3_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC3_LangSupport,
"lilo": bootloader.FC3_Bootloader,
"lilocheck": lilocheck.FC3_LiloCheck,
"logvol": logvol.FC3_LogVol,
"monitor": monitor.FC3_Monitor,
"mouse": mouse.RHEL3_Mouse,
"network": network.FC3_Network,
"nfs": method.FC3_Method,
"part": partition.FC3_Partition,
"partition": partition.FC3_Partition,
"poweroff": reboot.FC3_Reboot,
"raid": raid.FC3_Raid,
"reboot": reboot.FC3_Reboot,
"rootpw": rootpw.FC3_RootPw,
"shutdown": reboot.FC3_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC3_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC3_Method,
"vnc": vnc.FC3_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC3_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
},
# based on fc3
RHEL4: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.FC3_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.FC3_Bootloader,
"cdrom": method.FC3_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"driverdisk": driverdisk.FC4_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC3_Reboot,
"harddrive": method.FC3_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC3_LangSupport,
"lilo": bootloader.FC3_Bootloader,
"lilocheck": lilocheck.FC3_LiloCheck,
"logvol": logvol.FC3_LogVol,
"monitor": monitor.FC3_Monitor,
"mouse": mouse.FC3_Mouse,
"network": network.RHEL4_Network,
"nfs": method.FC3_Method,
"part": partition.FC3_Partition,
"partition": partition.FC3_Partition,
"poweroff": reboot.FC3_Reboot,
"raid": raid.FC3_Raid,
"reboot": reboot.FC3_Reboot,
"rootpw": rootpw.FC3_RootPw,
"selinux": selinux.FC3_SELinux,
"shutdown": reboot.FC3_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC3_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"url": method.FC3_Method,
"vnc": vnc.FC3_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC3_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on fc6
RHEL5: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F9_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.RHEL5_Bootloader,
"cdrom": method.FC6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.FC3_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F12_DriverDisk,
"firewall": firewall.FC3_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"halt": reboot.FC6_Reboot,
"harddrive": method.FC6_Method,
"ignoredisk": ignoredisk.F8_IgnoreDisk,
"install": upgrade.FC3_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.FC6_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"key": key.RHEL5_Key,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"langsupport": langsupport.FC5_LangSupport,
"logging": logging.FC6_Logging,
"logvol": logvol.RHEL5_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.FC6_Monitor,
"mouse": mouse.FC3_Mouse,
"multipath": multipath.FC6_MultiPath,
"network": network.RHEL5_Network,
"nfs": method.FC6_Method,
"part": partition.RHEL5_Partition,
"partition": partition.RHEL5_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.RHEL5_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.FC6_Repo,
"rootpw": rootpw.FC3_RootPw,
"services": services.FC6_Services,
"selinux": selinux.FC3_SELinux,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"upgrade": upgrade.FC3_Upgrade,
"user": user.FC6_User,
"url": method.FC6_Method,
"vnc": vnc.FC6_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.FC6_XConfig,
"zerombr": zerombr.FC3_ZeroMbr,
"zfcp": zfcp.FC3_ZFCP,
},
# based on f13ish
RHEL6: {
"auth": authconfig.FC3_Authconfig,
"authconfig": authconfig.FC3_Authconfig,
"autopart": autopart.F12_AutoPart,
"autostep": autostep.FC3_AutoStep,
"bootloader": bootloader.RHEL6_Bootloader,
"cdrom": method.RHEL6_Method,
"clearpart": clearpart.FC3_ClearPart,
"cmdline": displaymode.FC3_DisplayMode,
"device": device.F8_Device,
"deviceprobe": deviceprobe.FC3_DeviceProbe,
"dmraid": dmraid.FC6_DmRaid,
"driverdisk": driverdisk.F12_DriverDisk,
"fcoe": fcoe.F13_Fcoe,
"firewall": firewall.F10_Firewall,
"firstboot": firstboot.FC3_Firstboot,
"graphical": displaymode.FC3_DisplayMode,
"group": group.F12_Group,
"halt": reboot.FC6_Reboot,
"harddrive": method.RHEL6_Method,
"ignoredisk": ignoredisk.RHEL6_IgnoreDisk,
"install": upgrade.F11_Upgrade,
"interactive": interactive.FC3_Interactive,
"iscsi": iscsi.F10_Iscsi,
"iscsiname": iscsiname.FC6_IscsiName,
"keyboard": keyboard.FC3_Keyboard,
"lang": lang.FC3_Lang,
"logging": logging.FC6_Logging,
"logvol": logvol.F12_LogVol,
"mediacheck": mediacheck.FC4_MediaCheck,
"monitor": monitor.F10_Monitor,
"multipath": multipath.FC6_MultiPath,
"network": network.RHEL6_Network,
"nfs": method.RHEL6_Method,
"part": partition.F12_Partition,
"partition": partition.F12_Partition,
"poweroff": reboot.FC6_Reboot,
"raid": raid.F13_Raid,
"reboot": reboot.FC6_Reboot,
"repo": repo.RHEL6_Repo,
"rescue": rescue.F10_Rescue,
"rootpw": rootpw.F8_RootPw,
"selinux": selinux.FC3_SELinux,
"services": services.FC6_Services,
"shutdown": reboot.FC6_Reboot,
"skipx": skipx.FC3_SkipX,
"sshpw": sshpw.F13_SshPw,
"text": displaymode.FC3_DisplayMode,
"timezone": timezone.FC6_Timezone,
"updates": updates.F7_Updates,
"upgrade": upgrade.F11_Upgrade,
"url": method.RHEL6_Method,
"user": user.F12_User,
"vnc": vnc.F9_Vnc,
"volgroup": volgroup.FC3_VolGroup,
"xconfig": xconfig.F10_XConfig,
"zerombr": zerombr.F9_ZeroMbr,
"zfcp": zfcp.F12_ZFCP,
}
}
# This map is keyed on kickstart syntax version as provided by
# pykickstart.version. Within each sub-dict is a mapping from a data object
# name to the class that provides it. This is a bijective mapping - that is,
# each name maps to exactly one data class and all data classes have a name.
# More than one instance of each class is allowed to exist, however.
dataMap = {
FC3: {
"DriverDiskData": driverdisk.FC3_DriverDiskData,
"LogVolData": logvol.FC3_LogVolData,
"NetworkData": network.FC3_NetworkData,
"PartData": partition.FC3_PartData,
"RaidData": raid.FC3_RaidData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
FC4: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"LogVolData": logvol.FC4_LogVolData,
"NetworkData": network.FC4_NetworkData,
"PartData": partition.FC4_PartData,
"RaidData": raid.FC4_RaidData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
FC5: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"LogVolData": logvol.FC4_LogVolData,
"NetworkData": network.FC4_NetworkData,
"PartData": partition.FC4_PartData,
"RaidData": raid.FC5_RaidData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
FC6: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.FC6_IscsiData,
"LogVolData": logvol.FC4_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.FC6_NetworkData,
"PartData": partition.FC4_PartData,
"RaidData": raid.FC5_RaidData,
"RepoData": repo.FC6_RepoData,
"UserData": user.FC6_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F7: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.FC6_IscsiData,
"LogVolData": logvol.FC4_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.FC6_NetworkData,
"PartData": partition.FC4_PartData,
"RaidData": raid.F7_RaidData,
"RepoData": repo.FC6_RepoData,
"UserData": user.FC6_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F8: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.FC6_IscsiData,
"LogVolData": logvol.FC4_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.FC4_PartData,
"RaidData": raid.F7_RaidData,
"RepoData": repo.F8_RepoData,
"UserData": user.F8_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F9: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.FC6_IscsiData,
"LogVolData": logvol.F9_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F9_PartData,
"RaidData": raid.F9_RaidData,
"RepoData": repo.F8_RepoData,
"UserData": user.F8_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F10: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F9_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F9_PartData,
"RaidData": raid.F9_RaidData,
"RepoData": repo.F8_RepoData,
"UserData": user.F8_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F11: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F9_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F11_PartData,
"RaidData": raid.F9_RaidData,
"RepoData": repo.F11_RepoData,
"UserData": user.F8_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
F12: {
"DriverDiskData": driverdisk.F12_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F12_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F12_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F12_PartData,
"RaidData": raid.F12_RaidData,
"RepoData": repo.F11_RepoData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F12_ZFCPData,
},
F13: {
"DriverDiskData": driverdisk.F12_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F13_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F12_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F12_PartData,
"RaidData": raid.F13_RaidData,
"RepoData": repo.F13_RepoData,
"SshPwData": sshpw.F13_SshPwData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F12_ZFCPData,
},
F14: {
"DriverDiskData": driverdisk.F14_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F13_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F14_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F14_PartData,
"RaidData": raid.F14_RaidData,
"RepoData": repo.F14_RepoData,
"SshPwData": sshpw.F13_SshPwData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F14_ZFCPData,
},
F15: {
"DriverDiskData": driverdisk.F14_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F13_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F15_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F8_NetworkData,
"PartData": partition.F14_PartData,
"RaidData": raid.F15_RaidData,
"RepoData": repo.F15_RepoData,
"SshPwData": sshpw.F13_SshPwData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F14_ZFCPData,
},
F16: {
"DriverDiskData": driverdisk.F14_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F13_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F15_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.F16_NetworkData,
"PartData": partition.F14_PartData,
"RaidData": raid.F15_RaidData,
"RepoData": repo.F15_RepoData,
"SshPwData": sshpw.F13_SshPwData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F14_ZFCPData,
},
RHEL3: {
"DriverDiskData": driverdisk.FC3_DriverDiskData,
"LogVolData": logvol.FC3_LogVolData,
"NetworkData": network.RHEL4_NetworkData,
"PartData": partition.FC3_PartData,
"RaidData": raid.FC3_RaidData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
RHEL4: {
"DriverDiskData": driverdisk.FC4_DriverDiskData,
"LogVolData": logvol.FC3_LogVolData,
"NetworkData": network.RHEL4_NetworkData,
"PartData": partition.FC3_PartData,
"RaidData": raid.FC3_RaidData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
RHEL5: {
"DriverDiskData": driverdisk.F12_DriverDiskData,
"DmRaidData": dmraid.FC6_DmRaidData,
"IscsiData": iscsi.FC6_IscsiData,
"LogVolData": logvol.RHEL5_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.FC6_NetworkData,
"PartData": partition.RHEL5_PartData,
"RaidData": raid.RHEL5_RaidData,
"RepoData": repo.FC6_RepoData,
"UserData": user.FC6_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.FC3_ZFCPData,
},
RHEL6: {
"DriverDiskData": driverdisk.F12_DriverDiskData,
"DeviceData": device.F8_DeviceData,
"DmRaidData": dmraid.FC6_DmRaidData,
"FcoeData": fcoe.F13_FcoeData,
"GroupData": group.F12_GroupData,
"IscsiData": iscsi.F10_IscsiData,
"LogVolData": logvol.F12_LogVolData,
"MultiPathData": multipath.FC6_MultiPathData,
"NetworkData": network.RHEL6_NetworkData,
"PartData": partition.F12_PartData,
"RaidData": raid.F13_RaidData,
"RepoData": repo.RHEL6_RepoData,
"SshPwData": sshpw.F13_SshPwData,
"UserData": user.F12_UserData,
"VolGroupData": volgroup.FC3_VolGroupData,
"ZFCPData": zfcp.F12_ZFCPData,
}
}
| mit |
gustavovaliati/ci724-ppginfufpr-2016 | knn-02-06/knn-cuda_notworking_yet.py | 1 | 7979 | #!/usr/bin/python
import datetime, argparse, sys, os
import numpy as np
from multiprocessing import Pool
import pycuda.driver as drv
import pycuda.tools
import pycuda.autoinit
import numpy.linalg as la
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
ap = argparse.ArgumentParser()
ap.add_argument("-tr", "--train", required = True, help = "Is the training dataset path.")
ap.add_argument("-te", "--test", required = True, help = "Is the testing dataset path.")
ap.add_argument("-k", required = True, help = "Is K for the KNN algorithm.")
ap.add_argument("-lte", "--limit-test", required = False, help = "Sets a limit for how many testing sets must be used instead of the whole file.")
ap.add_argument("-ltr", "--limit-train", required = False, help = "Sets a limit for how many training sets must be used instead of the whole file.")
args = vars(ap.parse_args())
train_file_path = args["train"]
test_file_path = args["test"]
k_number = int(args["k"])
test_calculation_limit = False
test_calculation_limit_arg = args["limit_test"]
if (test_calculation_limit_arg):
test_calculation_limit = int(test_calculation_limit_arg)
else:
print "Be aware you didn't set a limit for the testing set. We are going to test all."
train_calculation_limit = False
train_calculation_limit_arg = args["limit_train"]
if (train_calculation_limit_arg):
train_calculation_limit = int(train_calculation_limit_arg)
else:
print "Be aware you didn't set a limit for the training set. We are going to use it all."
############
# STATIC PARAMETERS
############
classes = 10 #todo remove harded coded.
confusion_matrix = np.zeros((classes,classes), dtype=np.int)
result_error = 0
result_rejection = 0
total_testing = 0
total_training = 0
process_number = 4
############
#LOAD TRAINING FILE
############
train_file = open(train_file_path, "r")
print "Reading file: ", train_file_path
header = train_file.readline().split(" ")
train_number_lines = int(header[0])
number_features = int(header[1])
print "Lines {} | Features {}".format(train_number_lines, number_features)
if train_calculation_limit:
print "We are limiting to {} training sets.".format(train_calculation_limit)
if train_number_lines > train_calculation_limit:
total_training = train_calculation_limit
else:
print "\nERROR: the training limit is bigger than the actual number of testing sets."
sys.exit()
else:
total_training = train_number_lines
train_features = []
train_real_class = []
train_guessed_class = []
for train_index, features in enumerate(train_file):
if train_calculation_limit and train_index >= train_calculation_limit:
break
features = features.split(" ")
features_class = features.pop(number_features)
# features = np.array(map(float, features))
features = np.array(features, dtype=np.float32)
features_class = int(features_class.replace("\n",""))
train_features.append(features)
train_real_class.append(features_class)
############
#LOAD TEST FILE
############
test_file = open(test_file_path, "r")
print "Reading file: ", test_file_path
header = test_file.readline().split(" ")
test_number_lines = int(header[0])
number_features = int(header[1])
print "Lines {} | Features {}".format(test_number_lines, number_features)
if test_calculation_limit:
print "We are limiting to {} testing sets.".format(test_calculation_limit)
if test_number_lines > test_calculation_limit:
total_testing = test_calculation_limit
else:
print "\nERROR: the testing limit is bigger than the actual number of testing sets."
sys.exit()
else:
total_testing = test_number_lines
test_features = []
test_real_class = []
test_guessed_class = []
test_processed_lines = 0
for test_index, features in enumerate(test_file):
if test_calculation_limit and test_index >= test_calculation_limit:
break
features = features.split(" ")
features_class = features.pop(number_features)
# features = np.array(map(float, features))
features = np.array(features, dtype=np.float32)
features_class = int(features_class.replace("\n",""))
test_features.append(features)
test_real_class.append(features_class)
############
# CALCULATION
############
def print_summary(tested):
valid_total = tested - result_rejection
time_end = datetime.datetime.now()
print "Calculation time: {}".format(time_end - time_start)
if valid_total > 0:
correct = (valid_total - result_error) * 100.0 / valid_total
else:
correct = 0.0
print "Tested {} | Error: {} | Rejection {} | Correct {} %".format(tested, result_error, result_rejection, correct)
print confusion_matrix
def calc_distance(test_feat_index, train_feat_index):
# a = test_features[test_feat_index][:3]
# b = train_features[train_feat_index][:3]
# a = np.array(test_features[test_feat_index], dtype=np.float32)
# b = np.array(train_features[train_feat_index], dtype=np.float32)
# print a, a.shape
# print b, b.shape
a_gpu = gpuarray.to_gpu(np.array([0.4, 0.5, 0.6]))
b_gpu = gpuarray.to_gpu(np.array([0.1, 0.2, 0.3]))
# a_gpu = gpuarray.to_gpu(test_features[test_feat_index])
# b_gpu = gpuarray.to_gpu(train_features[train_feat_index])
# a_gpu = gpuarray.to_gpu(np.random.randn(132).astype(np.float32))
# b_gpu = gpuarray.to_gpu(np.random.randn(132).astype(np.float32))
a_doubled = ((a_gpu - b_gpu)**2).get()
return pycuda.gpuarray.sum(a_doubled)
# return 1
def calc_train(start, end, test_feat_index):
# print "pid", os.getpid(), start, end
# current_ranking = np.zeros(0)
current_ranking = np.zeros(0, dtype=np.float32)
dictionary = {}
for index in range(start, end):
distance = calc_distance(test_feat_index, index)
if current_ranking.size >= k_number:
# dictionary.pop(current_ranking[k_number-1])
current_ranking = np.delete(current_ranking, k_number-1, 0)
current_ranking = np.append(current_ranking, distance)
# print distance
dictionary[distance] = train_real_class[index]
current_ranking = np.sort(current_ranking, kind="mergesort")
# print current_ranking, dictionary
new_dic = {}
for r in current_ranking:
new_dic[r] = dictionary[r]
return new_dic, current_ranking
time_start = datetime.datetime.now()
offset = int(total_training / process_number)
pool = Pool(processes=process_number)
for test_index, test_feat in enumerate(test_features):
start = 0
workers = []
for i in range(process_number):
end = start+offset
worker = pool.apply_async(calc_train, (start, end, test_index))
workers.append(worker)
start = end
# print "workers",workers
k_ranking_dict = {}
ranking = []
for worker in workers:
d, r = worker.get()
ranking = np.concatenate((ranking, r))
k_ranking_dict.update(d)
ranking = np.sort(ranking, kind="mergesort")
ranking = ranking[0:k_number]
# print "here",ranking, k_ranking_dict
to_count_array = []
for r in ranking:
# print "k_ranking_dict[key]",k_ranking_dict[key]
to_count_array.append(k_ranking_dict[r])
counting = np.bincount(to_count_array)
guessed_class = np.argmax(counting)
guessed_counter = counting[guessed_class]
counting = np.delete(counting, guessed_class)
if guessed_counter in counting:
result_rejection = result_rejection + 1
continue
real_class = test_real_class[test_index]
confusion_matrix[real_class,guessed_class] = confusion_matrix[real_class,guessed_class] + 1
# print real_class, guessed_class
if real_class != guessed_class:
result_error = result_error + 1
print_summary(test_index+1)
############
# END - PRESENT RESULTS
############
print "\n FINISHED. Final summary: \n"
tested = len(test_features)
print_summary(tested)
| gpl-3.0 |
RichardLitt/wyrd-django-dev | django/utils/encoding.py | 4 | 9883 | from __future__ import unicode_literals
import codecs
import datetime
from decimal import Decimal
import locale
try:
from urllib.parse import quote
except ImportError: # Python 2
from urllib import quote
import warnings
from django.utils.functional import Promise
from django.utils import six
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class that derives __str__ from __unicode__.
On Python 2, __str__ returns the output of __unicode__ encoded as a UTF-8
bytestring. On Python 3, __str__ returns the output of __unicode__.
Useful as a mix-in. If you support Python 2 and 3 with a single code base,
you can inherit this mix-in and just define __unicode__.
"""
def __init__(self, *args, **kwargs):
warnings.warn("StrAndUnicode is deprecated. Define a __str__ method "
"and apply the @python_2_unicode_compatible decorator "
"instead.", PendingDeprecationWarning, stacklevel=2)
super(StrAndUnicode, self).__init__(*args, **kwargs)
if six.PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if not six.PY3:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time))
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
try:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """\
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """\
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path.replace("\\", "/")), safe=b"/~!*()'")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
| bsd-3-clause |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/pygame/tests/run_tests__tests/exclude/invisible_tag_test.py | 18 | 1163 | __tags__ = ['invisible']
if __name__ == '__main__':
import sys
import os
pkg_dir = (os.path.split(
os.path.split(
os.path.split(
os.path.abspath(__file__))[0])[0])[0])
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils import unittest
else:
from test import test_utils
from test.test_utils import unittest
class KeyModuleTest(unittest.TestCase):
def test_get_focused(self):
self.assert_(True)
def test_get_mods(self):
self.assert_(True)
def test_get_pressed(self):
self.assert_(True)
def test_name(self):
self.assert_(True)
def test_set_mods(self):
self.assert_(True)
def test_set_repeat(self):
self.assert_(True)
if __name__ == '__main__':
unittest.main()
| mit |
pilnujemy/pytamy | foundation/correspondence/views/letter.py | 2 | 2369 | from django.views.generic import DeleteView, DetailView, UpdateView, CreateView
from django.forms.models import BaseInlineFormSet
from django.forms.models import inlineformset_factory
from braces.views import LoginRequiredMixin, UserFormKwargsMixin, SelectRelatedMixin
from ..models import Letter, Attachment
from ..forms import LetterForm
from ..filters import LetterFilter
from .mixins import (InitialFormMixin, CreateFormMessagesMixin,
UpdateFormMessagesMixin, DeletedMessageMixin, CreateFormsetView, UpdateFormsetView)
from ..forms import AttachmentForm
from django_filters.views import FilterView
from crispy_forms.helper import FormHelper
from django.core.urlresolvers import reverse
class FormsetHelper(FormHelper):
form_tag = False
form_method = 'post'
class TableInlineHelper(FormsetHelper):
template = 'bootstrap/table_inline_formset.html'
def formset_attachment_factory(form_formset=None, *args, **kwargs):
if form_formset is None:
class BaseAttachmentFormSet(BaseInlineFormSet):
helper = TableInlineHelper()
form_formset = BaseAttachmentFormSet
return inlineformset_factory(Letter, Attachment, form=AttachmentForm, formset=form_formset,
*args, **kwargs)
AttachmentFormSet = formset_attachment_factory()
class LetterDetailView(SelectRelatedMixin, DetailView):
model = Letter
select_related = ["created_by", "modified_by", "contact"]
class LetterListView(SelectRelatedMixin, FilterView):
model = Letter
filterset_class = LetterFilter
select_related = ["created_by", "modified_by", "contact", ]
class LetterCreateView(LoginRequiredMixin, CreateFormMessagesMixin, UserFormKwargsMixin,
InitialFormMixin, CreateFormsetView, CreateView):
model = Letter
form_class = LetterForm
formset_class = {'attachment_form': AttachmentFormSet}
class LetterDeleteView(DeletedMessageMixin, DeleteView):
model = Letter
def get_success_message(self):
return self.object
def get_success_url(self):
return reverse('correspondence:contact_list')
class LetterUpdateView(LoginRequiredMixin, UpdateFormMessagesMixin, UserFormKwargsMixin,
UpdateFormsetView, UpdateView):
model = Letter
form_class = LetterForm
formset_class = {'attachment_form': AttachmentFormSet}
| bsd-3-clause |
rysson/filmkodi | plugin.video.mrknowtv/resources/lib/indexers/phstreams.py | 2 | 28077 | # -*- coding: utf-8 -*-
'''
Phoenix Add-on
Copyright (C) 2015 Blazetamer
Copyright (C) 2016 mrknow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,re,sys,urllib,urlparse
from resources.lib.libraries import cache
from resources.lib.libraries import cachemeta
from resources.lib.libraries import control
from resources.lib.libraries import client
from resources.lib.libraries import workers
from resources.lib.libraries import views
phLink = 'http://mecca.watchkodi.com/phstreams.xml'
phSearch = 'http://%s/search/search.xml'
phTest = 'testings.xml'
def getCategory():
getDirectory('0', phLink, '0', '0', '0', '0', '0', close=False)
addCategoryItem('NHL', 'nhlDirectory', 'hockey.jpg')
addCategoryItem(control.lang(30701).encode('utf-8'), 'openSettings', 'settings.png')
addCategoryItem(control.lang(30721).encode('utf-8'), 'downloader', 'downloader.png')
addCategoryItem(control.lang(30702).encode('utf-8'), 'search', 'search.png')
if phTest in control.listDir(control.dataPath)[1]:
addCategoryItem('Testings', 'localDirectory', 'home.png')
endCategory()
def localDirectory():
getDirectory('0', os.path.join(control.dataPath, phTest), '0', '0', '0', '0', '0', local=True)
def getDirectory(name, url, audio, image, fanart, playable, content, close=True, local=False):
if local == True:
f = control.openFile(url) ; result = f.read() ; f.close()
else:
result = cache.get(client.request, 0, url)
result = str(result).replace('\r','').replace('\n','').replace('\t','').replace(' ','')
try: fanart = re.findall('<fanart>(.+?)</fanart>', result)[0]
except: fanart = '0'
try:
notify = re.compile('<notify>(.+?)</notify>').findall(result)[0]
vip = re.findall('<poster>(.+?)</poster>', result)[0]
if not re.search('[a-zA-Z]', vip): raise Exception()
def message(vip): return (vip+version)
check = cache.get(message, 600000000, vip, table='rel_vip')
version = re.findall('<new>(.+?)</new>', notify)[0]
if not version.isdigit(): raise Exception()
if check == (vip+version): raise Exception()
title = '[B]Announcement From %s![/B]' % vip
msg1 = re.findall('<message1>(.+?)</message1>', notify)[0]
msg2 = re.findall('<message2>(.+?)</message2>', notify)[0]
msg3 = re.findall('<message3>(.+?)</message3>', notify)[0]
check = cache.get(message, 0, vip, table='rel_vip')
control.dialog.ok(str(title), str(msg1), str(msg2), str(msg3))
except:
pass
infos = re.compile('<info>(.+?)</info>').findall(result)
for info in infos:
try:
name = re.findall('<message>(.+?)</message>', info)[0]
try: image = re.findall('<thumbnail>(.+?)</thumbnail>', info)[0]
except: image = '0'
addDirectoryItem(name, '0', '0', image, image, fanart, '0', '0', {})
except:
pass
popups = re.compile('<popup>(.+?)</popup>').findall(result)
for popup in popups:
try:
name = re.findall('<name>(.+?)</name>', popup)[0]
url = re.findall('<popImage>(.+?)</popImage>', popup)[0]
try: image = re.findall('<thumbnail>(.+?)</thumbnail>', popup)[0]
except: image = '0'
try: audio = re.findall('<sound>(.+?)</sound>', popup)[0]
except: audio = '0'
addDirectoryItem(name, url, 'openDialog', image, image, fanart, audio, '0', {})
except:
pass
special = re.compile('<name>([^<]+)</name><link>([^<]+)</link><thumbnail>([^<]+)</thumbnail><date>([^<]+)</date>').findall(result)
for name, url, image, date in special:
if re.search(r'\d+', date): name += ' [COLOR red] Updated %s[/COLOR]' % date
addDirectoryItem(name, url, 'ndmode', image, image, fanart, '0', '0', {})
special = re.compile('<name>([^<]+)</name><link>([^<]+)</link><thumbnail>([^<]+)</thumbnail><mode>([^<]+)</mode>').findall(result)
for name, url, image, action in special:
addDirectoryItem(name, url, action, image, image, fanart, '0', '0', {})
meta = False
try: content = re.findall('<meta>(.+?)</meta>', result)[0]
except: content = '0'
try: tvshow = re.findall('<tvshow>(.+?)</tvshow>', result)[0]
except: tvshow = '0'
if content in ['seasons', 'episodes'] and tvshow == '0':
content = '0'
if content in ['movies', 'tvshows'] and control.setting('meta') == 'true':
try:
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
meta = True
except:
meta = False
elif content in ['seasons', 'episodes']:
try:
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
#tvd = metaget.get_meta('tvshow', tvshow)
tvd = cachemeta.get(metaget.get_meta, 24, 'tvshow', tvshow, '', '', '')
except:
tvd = {}
dirs = re.compile('<dir>(.+?)</dir>').findall(result)
totalItems = len(dirs)
for dir in dirs:
try:
data = {}
name = re.findall('<name>(.+?)</name>', dir)[0]
url = re.findall('<link>(.+?)</link>', dir)[0]
try: image = re.findall('<thumbnail>(.+?)</thumbnail>', dir)[0]
except: image = '0'
try: fanart2 = re.findall('<fanart>(.+?)</fanart>', dir)[0]
except: fanart2 = fanart
if meta == True and content =='tvshows':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title, 'tvshowtitle': title}
#data = metaget.get_meta('tvshow', title)
data = cachemeta.get(metaget.get_meta, 24, 'tvshow', title, '', '', '')
metafanart = data['backdrop_url']
if not metafanart == '': fanart2 = metafanart
except:
pass
elif content =='tvshows':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title, 'tvshowtitle': title}
except:
pass
elif content =='seasons':
try:
title = cleantitle(tvshow).encode('utf-8')
data = {'title': title, 'tvshowtitle': title}
data.update(tvd)
metafanart = tvd['backdrop_url']
if not metafanart == '': fanart2 = metafanart
except:
pass
addDirectoryItem(name, url, 'ndmode', image, image, fanart2, '0', content, data, totalItems=totalItems)
except:
pass
items = re.compile('<item>(.+?)</item>').findall(result)
try: sort = re.findall('<sort>(.+?)</sort>', result)[0]
except: sort = ''
if sort == 'yes': items = sorted(items)
totalItems = len(items)
for item in items:
try:
data = {}
name = re.findall('<title>(.+?)</title>', item)[0]
url = re.findall('<link>(.+?)</link>', item)[0]
try: image = image2 = re.findall('<thumbnail>(.+?)</thumbnail>', item)[0]
except: image = image2 = '0'
try: fanart2 = re.findall('<fanart>(.+?)</fanart>', item)[0]
except: fanart2 = fanart
if meta == True and content == 'movies':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title}
title, year = re.compile('(.+?)[(](\d{4})[)]').findall(name)[0]
title = cleantitle(title).encode('utf-8')
data = {'title': title, 'year': year}
#data = metaget.get_meta('movie', title, year=year)
data = cachemeta.get(metaget.get_meta, 24, 'movie', title, '', '', year)
metaimage = data['cover_url']
if not metaimage == '': image = metaimage
metafanart = data['backdrop_url']
if not metafanart == '': fanart2 = metafanart
except:
pass
elif content =='movies':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title}
title, year = re.compile('(.+?)[(](\d{4})[)]').findall(name)[0]
title = cleantitle(title).encode('utf-8')
data = {'title': title, 'year': year}
except:
pass
elif content == 'episodes':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title, 'tvshowtitle': tvshow}
except:
pass
try:
i = cleaneptitle(tvshow, title)
title, season, episode = i[0].encode('utf-8'), i[1], i[2]
data = {'title': title, 'tvshowtitle': tvshow, 'season': season, 'episode': episode}
except:
pass
try:
data.update({'year': tvd['year'], 'imdb_id' : tvd['imdb_id'], 'tvdb_id' : tvd['tvdb_id'], 'tvshowtitle': tvd['TVShowTitle'], 'genre' : tvd['genre'], 'studio': tvd['studio'], 'status': tvd['status'], 'duration' : tvd['duration'], 'rating': tvd['rating'], 'mpaa' : tvd['mpaa'], 'plot': tvd['plot'], 'cast': tvd['cast']})
metafanart = tvd['backdrop_url']
if not metafanart == '': image = fanart2 = metafanart
except:
pass
if 'sublink' in url:
addDirectoryItem(name, url, 'subDirectory', image, image2, fanart2, '0', content, data, tvshow=tvshow, totalItems=totalItems, isFolder=True)
else:
addDirectoryItem(name, url, 'resolveUrl', image, image2, fanart2, '0', content, data, totalItems=totalItems, isFolder=False)
except:
pass
endDirectory(content, close)
def subDirectory(name, url, audio, image, fanart, playable, tvshow, content):
match = re.compile('<sublink>(.+?)</sublink>').findall(url)
if len(match) == 0: return
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title}
except:
pass
try:
if not content == 'movies': raise Exception()
title = cleantitle(name).encode('utf-8')
data = {'title': title}
title, year = re.compile('(.+?)[(](\d{4})[)]').findall(name)[0]
title = cleantitle(title).encode('utf-8')
data = {'title': title, 'year': year}
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
#data = metaget.get_meta('movie', title, year=year)
data = cachemeta.get(metaget.get_meta, 24, 'movie', title, '', '', year)
metaimage = data['cover_url']
if not metaimage == '': image = metaimage
metafanart = data['backdrop_url']
if not metafanart == '': fanart = metafanart
except:
pass
try:
if tvshow == '0' and not content == 'episodes': raise Exception()
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title, 'tvshowtitle': tvshow}
except:
pass
try:
i = cleaneptitle(tvshow, title)
title, season, episode = i[0].encode('utf-8'), i[1], i[2]
data = {'title': title, 'tvshowtitle': tvshow, 'season': season, 'episode': episode}
except:
pass
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
tvd = metaget.get_meta('tvshow', tvshow)
tvd = cachemeta.get(metaget.get_meta, 24, 'tvshow', tvshow, '', '', '')
data.update({'year': tvd['year'], 'imdb_id' : tvd['imdb_id'], 'tvdb_id' : tvd['tvdb_id'], 'tvshowtitle': tvd['TVShowTitle'], 'genre' : tvd['genre'], 'studio': tvd['studio'], 'status': tvd['status'], 'duration' : tvd['duration'], 'rating': tvd['rating'], 'mpaa' : tvd['mpaa'], 'plot': tvd['plot'], 'cast': tvd['cast']})
metafanart = tvd['backdrop_url']
if not metafanart == '': image = fanart = metafanart
except:
pass
for i in range(0, len(match)):
url = match[i]
label = '%s %s %s' % (name, control.lang(30704).encode('utf-8'), str(i+1))
addDirectoryItem(label, url, 'resolveUrl', image, image, fanart, '0', content, data, isFolder=False)
control.directory(int(sys.argv[1]), cacheToDisc=True)
def getSearch():
addDirectoryItem('%s...' % control.lang(30702).encode('utf-8'), '0', 'searchDirectory', '0', '0', '0', '0', '0', {})
addDirectoryItem(control.lang(30703).encode('utf-8'), '0', 'clearSearch', '0', '0', '0', '0', '0', {})
try:
def search(): return
result = cache.get(search, 600000000, table='rel_srch')
for q in result:
try: addDirectoryItem('%s...' % q, q, 'searchDirectory2', '0', '0', '0', '0', '0', {})
except: pass
except:
pass
control.directory(int(sys.argv[1]), cacheToDisc=True)
def searchDirectory(query=None):
if (query == None or query == ''):
keyboard = control.keyboard('', control.lang(30702).encode('utf-8'))
keyboard.doModal()
if not (keyboard.isConfirmed()): return
query = keyboard.getText()
if (query == None or query == ''): return
def search(): return [query]
result = cache.get(search, 600000000, table='rel_srch')
def search(): return [x for y,x in enumerate((result + [query])) if x not in (result + [query])[:y]]
result = cache.get(search, 0, table='rel_srch')
global global_search ; global_search = []
def worker(url): global_search.append(str(client.request(url)))
servers = client.request(phLink)
servers = str(servers).replace('\n','')
servers = re.findall('</name><link>(.+?)</link>', servers)
servers = [urlparse.urlparse(i).netloc for i in servers]
servers = [phSearch % i for i in servers if not 'mecca' in i]
threads = []
for server in servers: threads.append(workers.Thread(worker, server))
[i.start() for i in threads]
[i.join() for i in threads]
urls = global_search ; global_search = []
urls = [str(i).replace('\n','') for i in urls]
urls = [re.findall('<link>(.+?)</link>', i)[:30] for i in urls]
urls = sum(urls, [])
threads = []
for url in urls: threads.append(workers.Thread(worker, url))
[i.start() for i in threads]
[i.join() for i in threads]
links = global_search ; global_search = []
for link in links:
try:
link = str(link).replace('\r','').replace('\n','').replace('\t','').replace(' ','')
try: fanart = re.findall('<fanart>(.+?)</fanart>', link)[0]
except: fanart = '0'
try: vip = re.findall('<poster>(.+?)</poster>', link)[0]
except: vip = ''
if vip == 'Team Phoenix': vip = ''
try: content = re.findall('<meta>(.+?)</meta>', link)[0]
except: content = '0'
try: tvshow = re.findall('<tvshow>(.+?)</tvshow>', link)[0]
except: tvshow = '0'
if content in ['seasons', 'episodes'] and tvshow == '0':
content = '0'
dirs = re.compile('<dir>(.+?)</dir>').findall(link)
for dir in dirs:
try:
data = {}
name = re.findall('<name>(.+?)</name>', dir)[0]
name = cleantitle(name)
if not query.lower() in name.lower() : raise Exception()
url = re.findall('<link>(.+?)</link>', dir)[0]
try: image = re.findall('<thumbnail>(.+?)</thumbnail>', dir)[0]
except: image = '0'
try: fanart2 = re.findall('<fanart>(.+?)</fanart>', dir)[0]
except: fanart2 = fanart
if content =='tvshows':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title, 'tvshowtitle': title}
except:
pass
if re.search('[a-zA-Z]', vip): name += ' [COLOR orange]%s[/COLOR]' % vip
addDirectoryItem(name, url, 'ndmode', image, image, fanart2, '0', content, data)
except:
pass
items = re.compile('<item>(.+?)</item>').findall(link)
for item in items:
try:
data = {}
name = re.findall('<title>(.+?)</title>', item)[0]
name = cleantitle(name)
if not query.lower() in name.lower() : raise Exception()
url = re.findall('<link>(.+?)</link>', item)[0]
try: image = re.findall('<thumbnail>(.+?)</thumbnail>', item)[0]
except: image = '0'
try: fanart2 = re.findall('<fanart>(.+?)</fanart>', item)[0]
except: fanart2 = fanart
if content =='movies':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title}
title, year = re.compile('(.+?)[(](\d{4})[)]').findall(name)[0]
title = cleantitle(title).encode('utf-8')
data = {'title': title, 'year': year}
except:
pass
if re.search('[a-zA-Z]', vip): name += ' [COLOR orange]%s[/COLOR]' % vip
if 'sublink' in url:
addDirectoryItem(name, url, 'subDirectory', image, image, fanart2, '0', content, data, isFolder=True)
else:
addDirectoryItem(name, url, 'resolveUrl', image, image, fanart2, '0', content, data, isFolder=False)
except:
pass
except:
pass
control.directory(int(sys.argv[1]), cacheToDisc=True)
def clearSearch():
cache.clear('rel_srch')
control.refresh()
def resolveUrl(name, url, audio, image, fanart, playable, content):
try:
if '.f4m'in url:
label = cleantitle(name)
ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
if not ext == 'f4m': raise Exception()
from resources.lib.libraries.f4mproxy.F4mProxy import f4mProxyHelper
return f4mProxyHelper().playF4mLink(url, label, None, None,'',image)
#legacy issue, will be removed later
if 'afdah.org' in url and not '</source>' in url: url += '<source>afdah</source>'
if '</source>' in url:
source = re.compile('<source>(.+?)</source>').findall(url)[0]
url = re.compile('(.+?)<source>').findall(url)[0]
for i in ['_mv', '_tv', '_mv_tv']:
try: call = __import__('resources.lib.sources.%s%s' % (source, i), globals(), locals(), ['object'], -1).source()
except: pass
from resources.lib import sources ; d = sources.sources()
url = call.get_sources(url, d.hosthdfullDict, d.hostsdfullDict, d.hostlocDict)
if type(url) == list and len(url) == 1:
url = url[0]['url']
elif type(url) == list:
url = sorted(url, key=lambda k: k['quality'])
for i in url: i.update((k, '720p') for k, v in i.iteritems() if v == 'HD')
for i in url: i.update((k, '480p') for k, v in i.iteritems() if v == 'SD')
q = ['[B]%s[/B] | %s' % (i['source'].upper(), i['quality'].upper()) for i in url]
u = [i['url'] for i in url]
select = control.selectDialog(q)
if select == -1: return
url = u[select]
url = call.resolve(url)
from resources.lib import resolvers
host = (urlparse.urlparse(url).netloc).rsplit('.', 1)[0].rsplit('.')[-1]
url = resolvers.request(url)
if type(url) == list and len(url) == 1:
url = url[0]['url']
elif type(url) == list:
url = sorted(url, key=lambda k: k['quality'])
for i in url: i.update((k, '720p') for k, v in i.iteritems() if v == 'HD')
for i in url: i.update((k, '480p') for k, v in i.iteritems() if v == 'SD')
q = ['[B]%s[/B] | %s' % (host.upper(), i['quality'].upper()) for i in url]
u = [i['url'] for i in url]
select = control.selectDialog(q)
if select == -1: return
url = u[select]
if url == None: raise Exception()
except:
return control.infoDialog(control.lang(30705).encode('utf-8'))
pass
if playable == 'true':
item = control.item(path=url)
return control.resolve(int(sys.argv[1]), True, item)
else:
label = cleantitle(name)
item = control.item(path=url, iconImage=image, thumbnailImage=image)
item.setInfo( type='Video', infoLabels = {'title': label} )
control.playlist.clear()
control.player.play(url, item)
def addCategoryItem(name, action, image, isFolder=True):
u = '%s?action=%s' % (sys.argv[0], str(action))
image = control.addonInfo('path') + '/resources/media/phstreams/' + image
item = control.item(name, iconImage=image, thumbnailImage=image)
item.addContextMenuItems([], replaceItems=False)
item.setProperty('Fanart_Image', control.addonInfo('fanart'))
control.addItem(handle=int(sys.argv[1]),url=u,listitem=item,isFolder=isFolder)
def addDirectoryItem(name, url, action, image, image2, fanart, audio, content, data, tvshow='0', totalItems=0, isFolder=True):
if not str(image).lower().startswith('http'): image = control.addonInfo('icon')
if not str(image2).lower().startswith('http'): image2 = control.addonInfo('icon')
if not str(fanart).lower().startswith('http'): fanart = control.addonInfo('fanart')
if content in ['movies', 'episodes']: playable = 'true'
else: playable = 'false'
sysaddon = sys.argv[0]
u = '%s?name=%s&url=%s&audio=%s&image=%s&fanart=%s&playable=%s&tvshow=%s&content=%s&action=%s' % (sysaddon, urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(audio), urllib.quote_plus(image), urllib.quote_plus(fanart), urllib.quote_plus(playable), str(tvshow), str(content), str(action))
cm = []
if content in ['movies', 'tvshows']:
data.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, urllib.quote_plus(name))})
cm.append((control.lang(30707).encode('utf-8'), 'RunPlugin(%s?action=trailer&name=%s)' % (sysaddon, urllib.quote_plus(name))))
if not 'plot' in data:
data.update({'plot': control.lang(30706).encode('utf-8')})
if content == 'movies':
cm.append((control.lang(30708).encode('utf-8'), 'XBMC.Action(Info)'))
elif content in ['tvshows', 'seasons']:
cm.append((control.lang(30709).encode('utf-8'), 'XBMC.Action(Info)'))
elif content == 'episodes':
cm.append((control.lang(30710).encode('utf-8'), 'XBMC.Action(Info)'))
if content == 'movies' and not isFolder == True:
downloadFile = name
try: downloadFile = '%s (%s)' % (data['title'], data['year'])
except: pass
cm.append((control.lang(30722).encode('utf-8'), 'RunPlugin(%s?action=addDownload&name=%s&url=%s&image=%s)' % (sysaddon, urllib.quote_plus(downloadFile), urllib.quote_plus(url), urllib.quote_plus(image))))
elif content == 'episodes' and not isFolder == True:
downloadFile = name
try: downloadFile = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))
except: pass
cm.append((control.lang(30722).encode('utf-8'), 'RunPlugin(%s?action=addDownload&name=%s&url=%s&image=%s)' % (sysaddon, urllib.quote_plus(downloadFile), urllib.quote_plus(url), urllib.quote_plus(image))))
if content == 'movies':
cm.append((control.lang(30711).encode('utf-8'), 'RunPlugin(%s?action=addView&content=movies)' % sysaddon))
elif content == 'tvshows':
cm.append((control.lang(30712).encode('utf-8'), 'RunPlugin(%s?action=addView&content=tvshows)' % sysaddon))
elif content == 'seasons':
cm.append((control.lang(30713).encode('utf-8'), 'RunPlugin(%s?action=addView&content=seasons)' % sysaddon))
elif content == 'episodes':
cm.append((control.lang(30714).encode('utf-8'), 'RunPlugin(%s?action=addView&content=episodes)' % sysaddon))
item = control.item(name, iconImage='DefaultFolder.png', thumbnailImage=image)
try: item.setArt({'poster': image2, 'tvshow.poster': image2, 'season.poster': image2, 'banner': image, 'tvshow.banner': image, 'season.banner': image})
except: pass
item.addContextMenuItems(cm, replaceItems=False)
item.setProperty('Fanart_Image', fanart)
if playable == 'true': item.setProperty('IsPlayable', 'true')
item.setInfo(type='Video', infoLabels=data)
control.addItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=totalItems,isFolder=isFolder)
def endCategory():
if control.skin == 'skin.confluence': control.execute('Container.SetViewMode(500)')
control.directory(int(sys.argv[1]), cacheToDisc=True)
def endDirectory(content, close):
if content in ['movies', 'tvshows', 'seasons', 'episodes']:
control.content(int(sys.argv[1]), content)
if close == True: control.directory(int(sys.argv[1]), cacheToDisc=True)
if close == True and content in ['movies', 'tvshows', 'seasons', 'episodes']:
views.setView(content)
def cleantitle(name):
name = re.sub('(\.|\_|\(|\[|\s)(Link \d*|link \d*)(\.|\_|\)|\]|$)', '', name)
name = re.sub('\(\d{4}.+?\d{4}\)$', '', name)
name = re.sub('\s\[COLOR.+?\].+?\[/COLOR\]|\[/COLOR\]\[COLOR.+?\]\s.+?\[/COLOR\]|\[COLOR.+?\]|\[/COLOR\]', '', name)
name = re.sub('\s\s+', ' ', name)
name = name.strip()
return name
def cleaneptitle(tvshow, name):
try:
p = re.compile('(S\d*E\d*)').findall(name)
p += re.compile('(s\d*e\d*)').findall(name)
p += re.compile('(Season \d* Episode \d*)').findall(name)
p += re.compile('(\d*x Episode \d*)').findall(name)
p += re.compile('(\d*x\d*)').findall(name)
p = p[0]
name = name.replace(tvshow, '').replace(p, '')
name = re.sub('-|:', '', name)
name = re.sub('\s\s+', ' ', name)
name = name.strip()
season = re.compile('(\d*)').findall(p)
season = [i for i in season if i.isdigit()][0]
season = '%01d' % int(season)
episode = re.compile('(\d*)').findall(p)
episode = [i for i in episode if i.isdigit()][-1]
episode = '%01d' % int(episode)
if re.match('[A-Z0-9]', name) == None:
name = '%s S%02dE%02d' % (tvshow, int(season), int(episode))
return (name, season, episode)
except:
return
| apache-2.0 |
clembou/PCWG | tests/interpolators_test.py | 1 | 4393 | import pcwg.core.interpolators as interpolators
import unittest
from pcwg.core.binning import Bins
class TestMarmanderPowerCurveInterpolator(unittest.TestCase):
def test_spreadsheet_benchmark(self):
x = [1.00,
2.00,
3.00,
4.10,
5.06,
6.04,
7.00,
8.00,
9.01,
9.98,
10.97,
12.00,
12.99,
13.95,
14.99,
16.01,
16.98,
17.84,
19.00,
20.00,
21.00,
22.00,
23.00,
24.00,
25.00,
26.00,
27.00,
28.00,
29.00,
30.00]
y = [0.0,
0.0,
0.0,
70.5,
198.6,
373.7,
578.4,
886.4,
1177.2,
1523.2,
1792.2,
1918.3,
1955.4,
1976.0,
1976.0,
1981.7,
1982.9,
1982.2,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
0.0,
0.0,
0.0,
0.0,
0.0]
cutOutWindSpeed = 25.0
limits = Bins(0.0,1.0, 30.0).limits
expectedX = [1.00,
2.00,
3.00,
3.55,
4.10,
5.06,
6.04,
7.00,
8.00,
9.01,
9.98,
10.97,
12.00,
12.99,
13.95,
14.99,
16.01,
16.98,
17.84,
18.42,
19.00,
20.00,
21.00,
22.00,
23.00,
24.00,
25.00,
25.01,
26.00,
27.00]
expectedY = [0.0,
0.0,
0.0,
0.0,
91.8,
204.2,
383.9,
571.1,
893.5,
1173.5,
1522.4,
1794.9,
1922.8,
1954.5,
1977.4,
1975.1,
1982.2,
1983.4,
1979.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
0.0,
0.0,
0.0
]
interpolator = interpolators.MarmanderPowerCurveInterpolator(x, y, cutOutWindSpeed, xLimits = limits, debug = False)
if interpolator.debug:
print interpolator.debugText
print "Cen\tExpect\tAct\tError\tTolerance\tMatch"
#NNOTE: a relative large tolerance required to make test pass.
#This is understood to be associated with differences
#between the cubic interpolation scheme implemented
#in the excel benchmark and scipy.
#TODO: Further work to bottom out on this difference.
for i in range(len(expectedX)):
if expectedX[i] < 6.0:
tolerancePercent = 0.02
else:
tolerancePercent = 0.005
actual = interpolator(expectedX[i])
error = actual - expectedY[i]
if expectedY[i] != 0.0:
errorPercent = (actual - expectedY[i]) / expectedY[i]
else:
errorPercent = 0.0
match = (abs(errorPercent) <= tolerancePercent)
print "{0:.2f}\t{1:.2f}\t{2:.2f}\t{3:.2f}%\t{4:.2f}%\t{5}".format(expectedX[i], expectedY[i], actual, (errorPercent * 100.0), (tolerancePercent * 100.0), match)
self.assertTrue(match)
if __name__ == '__main__':
unittest.main()
| mit |
bratsche/Neutron-Drive | google_appengine/lib/django_1_2/django/contrib/gis/geos/collections.py | 311 | 4663 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
from ctypes import c_int, c_uint, byref
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import get_pointer_arr, GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos import prototypes as capi
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__)
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid: self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join([g.kml for g in self])
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple([g.tuple for g in self])
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
if GEOS_PREPARE:
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
else:
raise GEOSException('The cascaded union operation requires GEOS 3.1+.')
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| bsd-3-clause |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/urllib3/response.py | 24 | 24667 | from __future__ import absolute_import
from contextlib import contextmanager
import zlib
import io
import logging
from socket import timeout as SocketTimeout
from socket import error as SocketError
from ._collections import HTTPHeaderDict
from .exceptions import (
BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
ResponseNotChunked, IncompleteRead, InvalidHeader
)
from .packages.six import string_types as basestring, binary_type, PY3
from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed, is_response_to_head
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
decompressed = self._obj.decompress(data)
if decompressed:
self._first_try = False
self._data = None
return decompressed
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoderState(object):
FIRST_MEMBER = 0
OTHER_MEMBERS = 1
SWALLOW_DATA = 2
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
self._state = GzipDecoderState.FIRST_MEMBER
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
ret = binary_type()
if self._state == GzipDecoderState.SWALLOW_DATA or not data:
return ret
while True:
try:
ret += self._obj.decompress(data)
except zlib.error:
previous_state = self._state
# Ignore data after the first error
self._state = GzipDecoderState.SWALLOW_DATA
if previous_state == GzipDecoderState.OTHER_MEMBERS:
# Allow trailing garbage acceptable in other gzip clients
return ret
raise
data = self._obj.unused_data
if not data:
return ret
self._state = GzipDecoderState.OTHER_MEMBERS
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None, msg=None,
retries=None, enforce_content_length=False,
request_method=None, request_url=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self.retries = retries
self.enforce_content_length = enforce_content_length
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self.msg = msg
self._request_url = request_url
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# Determine length of response
self.length_remaining = self._init_length(request_method)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
@property
def connection(self):
return self._connection
def isclosed(self):
return is_fp_closed(self._fp)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_length(self, request_method):
"""
Set initial length value for Response content if available.
"""
length = self.headers.get('content-length')
if length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning("Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked.")
return None
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
lengths = set([int(val) for val in length.split(',')])
if len(lengths) > 1:
raise InvalidHeader("Content-Length contained multiple "
"unmatching values (%s)" % length)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
# Convert status to int for comparison
# In some cases, httplib returns a status of "_UNKNOWN"
try:
status = int(self.status)
except ValueError:
status = 0
# Check for responses that shouldn't include a body
if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
length = 0
return length
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content:
data += self._flush_decoder()
return data
def _flush_decoder(self):
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
buf = self._decoder.decompress(b'')
return buf + self._decoder.flush()
return b''
@contextmanager
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except (HTTPException, SocketError) as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
data = None
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if self.enforce_content_length and self.length_remaining not in (0, None):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked and self.supports_chunked_reads():
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Backwards compatibility for http.cookiejar
def info(self):
return self.headers
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
if self._connection:
self._connection.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'isclosed'):
return self._fp.isclosed()
elif hasattr(self._fp, 'closed'):
return self._fp.closed
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def supports_chunked_reads(self):
"""
Checks if the underlying file-like object looks like a
httplib.HTTPResponse object. We do this by testing for the fp
attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return hasattr(self._fp, 'fp')
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be httplib.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks.")
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
# If a response is already read and closed
# then return immediately.
if self._fp.fp is None:
return
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(chunk, decode_content=decode_content,
flush_decoder=False)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
def geturl(self):
"""
Returns the URL that was the source of this response.
If the request that generated this response redirected, this method
will return the final redirect location.
"""
if self.retries is not None and len(self.retries.history):
return self.retries.history[-1].redirect_location
else:
return self._request_url
| gpl-3.0 |
mmatyas/servo | tests/wpt/grouping_formatter.py | 99 | 11175 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from mozlog.formatters import base
import collections
import os
import sys
import subprocess
DEFAULT_MOVE_UP_CODE = u"\x1b[A"
DEFAULT_CLEAR_EOL_CODE = u"\x1b[K"
class GroupingFormatter(base.BaseFormatter):
"""Formatter designed to produce unexpected test results grouped
together in a readable format."""
def __init__(self):
self.number_of_tests = 0
self.completed_tests = 0
self.need_to_erase_last_line = False
self.current_display = ""
self.running_tests = {}
self.test_output = collections.defaultdict(str)
self.subtest_failures = collections.defaultdict(list)
self.test_failure_text = ""
self.tests_with_failing_subtests = []
self.interactive = os.isatty(sys.stdout.fileno())
# TODO(mrobinson, 8313): We need to add support for Windows terminals here.
if self.interactive:
self.line_width = int(subprocess.check_output(['stty', 'size']).split()[1])
self.move_up, self.clear_eol = self.get_move_up_and_clear_eol_codes()
self.expected = {
'OK': 0,
'PASS': 0,
'FAIL': 0,
'ERROR': 0,
'TIMEOUT': 0,
'SKIP': 0,
'CRASH': 0,
}
self.unexpected_tests = {
'OK': [],
'PASS': [],
'FAIL': [],
'ERROR': [],
'TIMEOUT': [],
'CRASH': [],
}
def get_move_up_and_clear_eol_codes(self):
try:
import blessings
except ImportError:
return DEFAULT_MOVE_UP_CODE, DEFAULT_CLEAR_EOL_CODE
try:
self.terminal = blessings.Terminal()
return self.terminal.move_up, self.terminal.clear_eol
except Exception as exception:
sys.stderr.write("GroupingFormatter: Could not get terminal "
"control characters: %s\n" % exception)
return DEFAULT_MOVE_UP_CODE, DEFAULT_CLEAR_EOL_CODE
def text_to_erase_display(self):
if not self.interactive or not self.current_display:
return ""
return ((self.move_up + self.clear_eol) *
self.current_display.count('\n'))
def generate_output(self, text=None, new_display=None):
if not self.interactive:
return text
output = self.text_to_erase_display()
if text:
output += text
if new_display is not None:
self.current_display = new_display
return output + self.current_display
def build_status_line(self):
if self.number_of_tests == 0:
new_display = " [%i] " % self.completed_tests
else:
new_display = " [%i/%i] " % (self.completed_tests, self.number_of_tests)
if self.running_tests:
indent = " " * len(new_display)
if self.interactive:
max_width = self.line_width - len(new_display)
else:
max_width = sys.maxsize
return new_display + ("\n%s" % indent).join(
val[:max_width] for val in self.running_tests.values()) + "\n"
else:
return new_display + "No tests running.\n"
def suite_start(self, data):
self.number_of_tests = len(data["tests"])
self.start_time = data["time"]
if self.number_of_tests == 0:
return "Running tests in %s\n\n" % data[u'source']
else:
return "Running %i tests in %s\n\n" % (self.number_of_tests, data[u'source'])
def test_start(self, data):
self.running_tests[data['thread']] = data['test']
return self.generate_output(text=None,
new_display=self.build_status_line())
def wrap_and_indent_lines(self, lines, indent):
assert(len(lines) > 0)
output = indent + u"\u25B6 %s\n" % lines[0]
for line in lines[1:-1]:
output += indent + u"\u2502 %s\n" % line
if len(lines) > 1:
output += indent + u"\u2514 %s\n" % lines[-1]
return output
def get_lines_for_unexpected_result(self,
test_name,
status,
expected,
message,
stack):
# Test names sometimes contain control characters, which we want
# to be printed in their raw form, and not their interpreted form.
test_name = test_name.encode('unicode-escape')
if expected:
expected_text = u" [expected %s]" % expected
else:
expected_text = u""
lines = [u"%s%s %s" % (status, expected_text, test_name)]
if message:
lines.append(u" \u2192 %s" % message)
if stack:
lines.append("")
lines += [stackline for stackline in stack.splitlines()]
return lines
def get_output_for_unexpected_subtests(self, test_name, unexpected_subtests):
if not unexpected_subtests:
return ""
def add_subtest_failure(lines, subtest, stack=None):
lines += self.get_lines_for_unexpected_result(
subtest.get('subtest', None),
subtest.get('status', None),
subtest.get('expected', None),
subtest.get('message', None),
stack)
def make_subtests_failure(test_name, subtests, stack=None):
lines = [u"Unexpected subtest result in %s:" % test_name]
for subtest in subtests[:-1]:
add_subtest_failure(lines, subtest, None)
add_subtest_failure(lines, subtests[-1], stack)
return self.wrap_and_indent_lines(lines, " ") + "\n"
# Organize the failures by stack trace so we don't print the same stack trace
# more than once. They are really tall and we don't want to flood the screen
# with duplicate information.
output = ""
failures_by_stack = collections.defaultdict(list)
for failure in unexpected_subtests:
# Print stackless results first. They are all separate.
if 'stack' not in failure:
output += make_subtests_failure(test_name, [failure], None)
else:
failures_by_stack[failure['stack']].append(failure)
for (stack, failures) in failures_by_stack.iteritems():
output += make_subtests_failure(test_name, failures, stack)
return output
def test_end(self, data):
self.completed_tests += 1
test_status = data["status"]
test_name = data["test"]
had_unexpected_test_result = "expected" in data
subtest_failures = self.subtest_failures.pop(test_name, [])
del self.running_tests[data['thread']]
new_display = self.build_status_line()
if not had_unexpected_test_result and not subtest_failures:
self.expected[test_status] += 1
if self.interactive:
return self.generate_output(text=None, new_display=new_display)
else:
return self.generate_output(text=" %s\n" % test_name,
new_display=new_display)
# If the test crashed or timed out, we also include any process output,
# because there is a good chance that the test produced a stack trace
# or other error messages.
if test_status in ("CRASH", "TIMEOUT"):
stack = self.test_output[test_name] + data.get('stack', "")
else:
stack = data.get('stack', None)
output = ""
if had_unexpected_test_result:
self.unexpected_tests[test_status].append(data)
lines = self.get_lines_for_unexpected_result(
test_name,
test_status,
data.get('expected', None),
data.get('message', None),
stack)
output += self.wrap_and_indent_lines(lines, " ") + "\n"
if subtest_failures:
self.tests_with_failing_subtests.append(test_name)
output += self.get_output_for_unexpected_subtests(test_name,
subtest_failures)
self.test_failure_text += output
return self.generate_output(text=output, new_display=new_display)
def test_status(self, data):
if "expected" in data:
self.subtest_failures[data["test"]].append(data)
def suite_end(self, data):
self.end_time = data["time"]
if not self.interactive:
output = u"\n"
else:
output = ""
output += u"Ran %i tests finished in %.1f seconds.\n" % (
self.completed_tests, (self.end_time - self.start_time) / 1000)
output += u" \u2022 %i ran as expected. %i tests skipped.\n" % (
sum(self.expected.values()), self.expected['SKIP'])
def text_for_unexpected_list(text, section):
tests = self.unexpected_tests[section]
if not tests:
return u""
return u" \u2022 %i tests %s\n" % (len(tests), text)
output += text_for_unexpected_list(u"crashed unexpectedly", 'CRASH')
output += text_for_unexpected_list(u"had errors unexpectedly", 'ERROR')
output += text_for_unexpected_list(u"failed unexpectedly", 'FAIL')
output += text_for_unexpected_list(u"timed out unexpectedly", 'TIMEOUT')
output += text_for_unexpected_list(u"passed unexpectedly", 'PASS')
output += text_for_unexpected_list(u"unexpectedly okay", 'OK')
num_with_failing_subtests = len(self.tests_with_failing_subtests)
if num_with_failing_subtests:
output += (u" \u2022 %i tests had unexpected subtest results\n"
% num_with_failing_subtests)
output += "\n"
# Repeat failing test output, so that it is easier to find, since the
# non-interactive version prints all the test names.
if not self.interactive and self.test_failure_text:
output += u"Tests with unexpected results:\n" + self.test_failure_text
return self.generate_output(text=output, new_display="")
def process_output(self, data):
if data['thread'] not in self.running_tests:
return
test_name = self.running_tests[data['thread']]
self.test_output[test_name] += data['data'] + "\n"
def log(self, data):
# We are logging messages that begin with STDERR, because that is how exceptions
# in this formatter are indicated.
if data['message'].startswith('STDERR'):
return self.generate_output(text=data['message'] + "\n")
if data['level'] in ('CRITICAL', 'ERROR'):
return self.generate_output(text=data['message'] + "\n")
| mpl-2.0 |
shengdie/Dorimanx-LG-G2-D802-Kernel | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
themutt/plastex | build/lib.linux-x86_64-2.7/plasTeX/Packages/changebar.py | 9 | 1056 | #!/usr/bin/env python
from plasTeX import Command, Environment, DimenCommand, Counter
def ProcessOptions(options, document):
context = document.context
class cbstart(Command):
def invoke(self, tex):
cb = self.ownerDocument.createElement('changebar')
cb.macroMode = self.MODE_BEGIN
cb.invoke(tex)
return [cb]
class cbend(Command):
def invoke(self, tex):
cb = self.ownerDocument.createElement('changebar')
cb.macroMode = self.MODE_END
cb.invoke(tex)
return [cb]
class changebar(Environment):
args = '[ width:str ]'
blockType = True
forcePars = True
class cbdelete(Command):
args = '[ width:str ]'
class nochangebars(Command):
pass
class cbcolor(Command):
args = '[ model:str ] color:str'
class changebarwidth(DimenCommand):
pass
class deletebarwidth(DimenCommand):
pass
class changebarsep(DimenCommand):
pass
class changebargrey(Counter):
pass
class outerbarstrue(Command):
pass
class driver(Command):
args = 'name:str'
| mit |
sgraham/nope | tools/chrome_proxy/integration_tests/network_metrics.py | 9 | 6588 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import gzip
import hashlib
import io
import logging
import zlib
from metrics import Metric
from telemetry.page import page_test
# All network metrics are Chrome only for now.
from telemetry.core.backends.chrome_inspector import inspector_network
from telemetry.value import scalar
class NetworkMetricException(page_test.MeasurementFailure):
pass
class HTTPResponse(object):
""" Represents an HTTP response from a timeline event."""
def __init__(self, event):
self._response = (
inspector_network.InspectorNetworkResponseData.FromTimelineEvent(event))
self._remote_port = None
if 'response' in event.args and 'remotePort' in event.args['response']:
self._remote_port = event.args['response']['remotePort']
self._content_length = None
@property
def response(self):
return self._response
@property
def remote_port(self):
return self._remote_port
@property
def url_signature(self):
return hashlib.md5(self.response.url).hexdigest()
@property
def content_length(self):
if self._content_length is None:
self._content_length = self.GetContentLength()
return self._content_length
@property
def has_original_content_length(self):
return 'X-Original-Content-Length' in self.response.headers
@property
def original_content_length(self):
if self.has_original_content_length:
return int(self.response.GetHeader('X-Original-Content-Length'))
return 0
@property
def data_saving_rate(self):
if (self.response.served_from_cache or
not self.has_original_content_length or
self.original_content_length <= 0):
return 0.0
return (float(self.original_content_length - self.content_length) /
self.original_content_length)
def GetContentLengthFromBody(self):
resp = self.response
body, base64_encoded = resp.GetBody()
if not body:
return 0
# The binary data like images, etc is base64_encoded. Decode it to get
# the actualy content length.
if base64_encoded:
decoded = base64.b64decode(body)
return len(decoded)
encoding = resp.GetHeader('Content-Encoding')
if not encoding:
return len(body)
# The response body returned from a timeline event is always decompressed.
# So, we need to compress it to get the actual content length if headers
# say so.
encoding = encoding.lower()
if encoding == 'gzip':
return self.GetGizppedBodyLength(body)
elif encoding == 'deflate':
return len(zlib.compress(body, 9))
else:
raise NetworkMetricException, (
'Unknown Content-Encoding %s for %s' % (encoding, resp.url))
def GetContentLength(self):
cl = 0
try:
cl = self.GetContentLengthFromBody()
except Exception, e:
logging.warning('Fail to get content length for %s from body: %s',
self.response.url[:100], e)
if cl == 0:
resp = self.response
cl_header = resp.GetHeader('Content-Length')
if cl_header:
cl = int(cl_header)
else:
body, _ = resp.GetBody()
if body:
cl = len(body)
return cl
@staticmethod
def GetGizppedBodyLength(body):
if not body:
return 0
bio = io.BytesIO()
try:
with gzip.GzipFile(fileobj=bio, mode="wb", compresslevel=9) as f:
f.write(body.encode('utf-8'))
except Exception, e:
logging.warning('Fail to gzip response body: %s', e)
raise e
return len(bio.getvalue())
class NetworkMetric(Metric):
"""A network metric based on timeline events."""
def __init__(self):
super(NetworkMetric, self).__init__()
# Whether to add detailed result for each sub-resource in a page.
self.add_result_for_resource = False
self.compute_data_saving = False
self._events = None
def Start(self, page, tab):
self._events = None
tab.StartTimelineRecording()
def Stop(self, page, tab):
assert self._events is None
tab.StopTimelineRecording()
def IterResponses(self, tab):
if self._events is None:
self._events = tab.timeline_model.GetAllEventsOfName('HTTPResponse')
if len(self._events) == 0:
return
for e in self._events:
yield self.ResponseFromEvent(e)
def ResponseFromEvent(self, event):
return HTTPResponse(event)
def AddResults(self, tab, results):
content_length = 0
original_content_length = 0
for resp in self.IterResponses(tab):
# Ignore content length calculation for cache hit.
if resp.response.served_from_cache:
continue
resource = resp.response.url
resource_signature = resp.url_signature
cl = resp.content_length
if resp.has_original_content_length:
ocl = resp.original_content_length
if ocl < cl:
logging.warning('original content length (%d) is less than content '
'length (%d) for resource %s', ocl, cl, resource)
if self.add_result_for_resource:
results.AddValue(scalar.ScalarValue(
results.current_page,
'resource_data_saving_' + resource_signature, 'percent',
resp.data_saving_rate * 100))
results.AddValue(scalar.ScalarValue(
results.current_page,
'resource_original_content_length_' + resource_signature, 'bytes',
ocl))
original_content_length += ocl
else:
original_content_length += cl
if self.add_result_for_resource:
results.AddValue(scalar.ScalarValue(
results.current_page,
'resource_content_length_' + resource_signature, 'bytes', cl))
content_length += cl
results.AddValue(scalar.ScalarValue(
results.current_page, 'content_length', 'bytes', content_length))
results.AddValue(scalar.ScalarValue(
results.current_page, 'original_content_length', 'bytes',
original_content_length))
if self.compute_data_saving:
if (original_content_length > 0 and
original_content_length >= content_length):
saving = (float(original_content_length-content_length) * 100 /
original_content_length)
results.AddValue(scalar.ScalarValue(
results.current_page, 'data_saving', 'percent', saving))
else:
results.AddValue(scalar.ScalarValue(
results.current_page, 'data_saving', 'percent', 0.0))
| bsd-3-clause |
thresholdsoftware/asylum-v2.0 | openerp/addons/mrp/stock.py | 18 | 9255 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
from openerp import netsvc
class StockMove(osv.osv):
_inherit = 'stock.move'
_columns = {
'production_id': fields.many2one('mrp.production', 'Production', select=True),
}
def create_chained_picking(self, cr, uid, moves, context=None):
new_moves = super(StockMove, self).create_chained_picking(cr, uid, moves, context=context)
self.write(cr, uid, [x.id for x in new_moves], {'production_id': False}, context=context)
return new_moves
def _action_explode(self, cr, uid, move, context=None):
""" Explodes pickings.
@param move: Stock moves
@return: True
"""
bom_obj = self.pool.get('mrp.bom')
move_obj = self.pool.get('stock.move')
procurement_obj = self.pool.get('procurement.order')
product_obj = self.pool.get('product.product')
wf_service = netsvc.LocalService("workflow")
processed_ids = [move.id]
if move.product_id.supply_method == 'produce':
bis = bom_obj.search(cr, uid, [
('product_id','=',move.product_id.id),
('bom_id','=',False),
('type','=','phantom')])
if bis:
factor = move.product_qty
bom_point = bom_obj.browse(cr, uid, bis[0], context=context)
res = bom_obj._bom_explode(cr, uid, bom_point, factor, [])
for line in res[0]:
valdef = {
'picking_id': move.picking_id.id,
'product_id': line['product_id'],
'product_uom': line['product_uom'],
'product_qty': line['product_qty'],
'product_uos': line['product_uos'],
'product_uos_qty': line['product_uos_qty'],
'move_dest_id': move.id,
'state': 'draft', #will be confirmed below
'name': line['name'],
'move_history_ids': [(6,0,[move.id])],
'move_history_ids2': [(6,0,[])],
'procurements': [],
}
mid = move_obj.copy(cr, uid, move.id, default=valdef)
processed_ids.append(mid)
prodobj = product_obj.browse(cr, uid, line['product_id'], context=context)
proc_id = procurement_obj.create(cr, uid, {
'name': (move.picking_id.origin or ''),
'origin': (move.picking_id.origin or ''),
'date_planned': move.date,
'product_id': line['product_id'],
'product_qty': line['product_qty'],
'product_uom': line['product_uom'],
'product_uos_qty': line['product_uos'] and line['product_uos_qty'] or False,
'product_uos': line['product_uos'],
'location_id': move.location_id.id,
'procure_method': prodobj.procure_method,
'move_id': mid,
})
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_confirm', cr)
move_obj.write(cr, uid, [move.id], {
'location_dest_id': move.location_id.id, # dummy move for the kit
'auto_validate': True,
'picking_id': False,
'state': 'confirmed'
})
for m in procurement_obj.search(cr, uid, [('move_id','=',move.id)], context):
wf_service.trg_validate(uid, 'procurement.order', m, 'button_confirm', cr)
wf_service.trg_validate(uid, 'procurement.order', m, 'button_wait_done', cr)
if processed_ids and move.state == 'assigned':
# Set the state of resulting moves according to 'assigned' as the original move is assigned
move_obj.write(cr, uid, list(set(processed_ids) - set([move.id])), {'state': 'assigned'}, context=context)
return processed_ids
def action_consume(self, cr, uid, ids, product_qty, location_id=False, context=None):
""" Consumed product with specific quatity from specific source location.
@param product_qty: Consumed product quantity
@param location_id: Source location
@return: Consumed lines
"""
res = []
production_obj = self.pool.get('mrp.production')
wf_service = netsvc.LocalService("workflow")
for move in self.browse(cr, uid, ids):
move.action_confirm(context)
new_moves = super(StockMove, self).action_consume(cr, uid, [move.id], product_qty, location_id, context=context)
production_ids = production_obj.search(cr, uid, [('move_lines', 'in', [move.id])])
for prod in production_obj.browse(cr, uid, production_ids, context=context):
if prod.state == 'confirmed':
production_obj.force_production(cr, uid, [prod.id])
wf_service.trg_validate(uid, 'mrp.production', prod.id, 'button_produce', cr)
for new_move in new_moves:
if new_move == move.id:
#This move is already there in move lines of production order
continue
production_obj.write(cr, uid, production_ids, {'move_lines': [(4, new_move)]})
res.append(new_move)
return res
def action_scrap(self, cr, uid, ids, product_qty, location_id, context=None):
""" Move the scrap/damaged product into scrap location
@param product_qty: Scraped product quantity
@param location_id: Scrap location
@return: Scraped lines
"""
res = []
production_obj = self.pool.get('mrp.production')
wf_service = netsvc.LocalService("workflow")
for move in self.browse(cr, uid, ids, context=context):
new_moves = super(StockMove, self).action_scrap(cr, uid, [move.id], product_qty, location_id, context=context)
#If we are not scrapping our whole move, tracking and lot references must not be removed
#self.write(cr, uid, [move.id], {'prodlot_id': False, 'tracking_id': False})
production_ids = production_obj.search(cr, uid, [('move_lines', 'in', [move.id])])
for prod_id in production_ids:
wf_service.trg_validate(uid, 'mrp.production', prod_id, 'button_produce', cr)
for new_move in new_moves:
production_obj.write(cr, uid, production_ids, {'move_lines': [(4, new_move)]})
res.append(new_move)
return res
StockMove()
class StockPicking(osv.osv):
_inherit = 'stock.picking'
#
# Explode picking by replacing phantom BoMs
#
def action_explode(self, cr, uid, move_ids, *args):
"""Explodes moves by expanding kit components"""
move_obj = self.pool.get('stock.move')
todo = list(super(StockPicking, self).action_explode(cr, uid, move_ids, *args))
for move in move_obj.browse(cr, uid, move_ids):
result = move_obj._action_explode(cr, uid, move)
moves = move_obj.browse(cr, uid, result)
todo.extend(move.id for move in moves if move.state not in ['confirmed', 'assigned', 'done'])
return list(set(todo))
StockPicking()
class split_in_production_lot(osv.osv_memory):
_inherit = "stock.move.split"
def split(self, cr, uid, ids, move_ids, context=None):
""" Splits move lines into given quantities.
@param move_ids: Stock moves.
@return: List of new moves.
"""
new_moves = super(split_in_production_lot, self).split(cr, uid, ids, move_ids, context=context)
production_obj = self.pool.get('mrp.production')
production_ids = production_obj.search(cr, uid, [('move_lines', 'in', move_ids)])
production_obj.write(cr, uid, production_ids, {'move_lines': [(4, m) for m in new_moves]})
return new_moves
split_in_production_lot()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-keys/samples/backup_restore_operations_async.py | 1 | 3785 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
import os
from azure.keyvault.keys.aio import KeyClient
from azure.identity.aio import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-keys and azure-identity libraries (pip install these)
#
# 3. Set Environment variables AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, VAULT_URL
# (See https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/keyvault/azure-keyvault-keys#authenticate-the-client)
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates the basic backup and restore operations on a vault(key) resource for Azure Key Vault
#
# 1. Create a key (create_key)
#
# 2. Backup a key (backup_key)
#
# 3. Delete a key (delete_key)
#
# 4. Purge a key (purge_deleted_key)
#
# 5. Restore a key (restore_key_backup)
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a key client that will be used to call the service.
# Notice that the client is using default Azure credentials.
# To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID',
# 'AZURE_CLIENT_SECRET' and 'AZURE_TENANT_ID' are set with the service principal credentials.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = KeyClient(vault_url=VAULT_URL, credential=credential)
try:
# Let's create a Key of type RSA.
# if the key already exists in the Key Vault, then a new version of the key is created.
print("\n.. Create Key")
key = await client.create_key("keyName", "RSA")
print("Key with name '{0}' created with key type '{1}'".format(key.name, key.key_type))
# Backups are good to have, if in case keys gets deleted accidentally.
# For long term storage, it is ideal to write the backup to a file.
print("\n.. Create a backup for an existing Key")
key_backup = await client.backup_key(key.name)
print("Backup created for key with name '{0}'.".format(key.name))
# The rsa key is no longer in use, so you delete it.
deleted_key = await client.delete_key(key.name)
print("Deleted key with name '{0}'".format(deleted_key.name))
# Purge the deleted key.
# The purge will take some time, so wait before restoring the backup to avoid a conflict.
print("\n.. Purge the key")
await client.purge_deleted_key(key.name)
await asyncio.sleep(60)
print("Purged key with name '{0}'".format(deleted_key.name))
# In the future, if the key is required again, we can use the backup value to restore it in the Key Vault.
print("\n.. Restore the key using the backed up key bytes")
key = await client.restore_key_backup(key_backup)
print("Restored key with name '{0}'".format(key.name))
except HttpResponseError as e:
print("\nrun_sample has caught an error. {0}".format(e.message))
finally:
print("\nrun_sample done")
await credential.close()
await client.close()
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
loop.close()
except Exception as e:
print("Top level Error: {0}".format(str(e)))
| mit |
ondrokrc/gramps | gramps/gen/filters/rules/media/_hasidof.py | 2 | 1651 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import HasGrampsId
#-------------------------------------------------------------------------
#
# HasIdOf
#
#-------------------------------------------------------------------------
class HasIdOf(HasGrampsId):
"""Rule that checks for a media object with a specific GRAMPS ID"""
name = _('Media object with <Id>')
description = _("Matches a media object with a specified Gramps ID")
| gpl-2.0 |
pniedzielski/fb-hackathon-2013-11-21 | src/repl.it/jsrepl/extern/python/closured/lib/python2.7/encodings/shift_jis.py | 816 | 1039 | #
# shift_jis.py: Python Unicode Codec for SHIFT_JIS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jis',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| agpl-3.0 |
ryfeus/lambda-packs | Tensorflow_OpenCV_Nightly/source/tensorflow/python/tools/strip_unused_lib.py | 104 | 4859 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to remove unneeded nodes from a GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
def strip_unused(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a GraphDef.
Args:
input_graph_def: A graph with nodes we want to prune.
input_node_names: A list of the nodes we use as inputs.
output_node_names: A list of the output nodes.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
A `GraphDef` with all unnecessary ops removed.
Raises:
ValueError: If any element in `input_node_names` refers to a tensor instead
of an operation.
KeyError: If any element in `input_node_names` is not found in the graph.
"""
for name in input_node_names:
if ":" in name:
raise ValueError("Name '%s' appears to refer to a Tensor, "
"not a Operation." % name)
# Here we replace the nodes we're going to override as inputs with
# placeholders so that any unused nodes that are inputs to them are
# automatically stripped out by extract_sub_graph().
not_found = {name for name in input_node_names}
inputs_replaced_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in input_node_names:
not_found.remove(node.name)
placeholder_node = node_def_pb2.NodeDef()
placeholder_node.op = "Placeholder"
placeholder_node.name = node.name
if isinstance(placeholder_type_enum, list):
input_node_index = input_node_names.index(node.name)
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum[
input_node_index]))
else:
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum))
if "_output_shapes" in node.attr:
placeholder_node.attr["_output_shapes"].CopyFrom(node.attr[
"_output_shapes"])
inputs_replaced_graph_def.node.extend([placeholder_node])
else:
inputs_replaced_graph_def.node.extend([copy.deepcopy(node)])
if not_found:
raise KeyError("The following input nodes were not found: %s\n" % not_found)
output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def,
output_node_names)
return output_graph_def
def strip_unused_from_files(input_graph, input_binary, output_graph,
output_binary, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a graph file."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
output_graph_def = strip_unused(input_graph_def,
input_node_names.split(","),
output_node_names.split(","),
placeholder_type_enum)
if output_binary:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
| mit |
ryfeus/lambda-packs | Keras_tensorflow/source/google/protobuf/internal/test_util.py | 38 | 29253 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for Python proto2 tests.
This is intentionally modeled on C++ code in
//google/protobuf/test_util.*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import os.path
import sys
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
# Tests whether the given TestAllTypes message is proto2 or not.
# This is used to gate several fields/features that only exist
# for the proto2 version of the message.
def IsProto2(message):
return message.DESCRIPTOR.syntax == "proto2"
def SetAllNonLazyFields(message):
"""Sets every non-lazy field in the message to a unique value.
Args:
message: A TestAllTypes instance.
"""
#
# Optional fields.
#
message.optional_int32 = 101
message.optional_int64 = 102
message.optional_uint32 = 103
message.optional_uint64 = 104
message.optional_sint32 = 105
message.optional_sint64 = 106
message.optional_fixed32 = 107
message.optional_fixed64 = 108
message.optional_sfixed32 = 109
message.optional_sfixed64 = 110
message.optional_float = 111
message.optional_double = 112
message.optional_bool = True
message.optional_string = u'115'
message.optional_bytes = b'116'
if IsProto2(message):
message.optionalgroup.a = 117
message.optional_nested_message.bb = 118
message.optional_foreign_message.c = 119
message.optional_import_message.d = 120
message.optional_public_import_message.e = 126
message.optional_nested_enum = unittest_pb2.TestAllTypes.BAZ
message.optional_foreign_enum = unittest_pb2.FOREIGN_BAZ
if IsProto2(message):
message.optional_import_enum = unittest_import_pb2.IMPORT_BAZ
message.optional_string_piece = u'124'
message.optional_cord = u'125'
#
# Repeated fields.
#
message.repeated_int32.append(201)
message.repeated_int64.append(202)
message.repeated_uint32.append(203)
message.repeated_uint64.append(204)
message.repeated_sint32.append(205)
message.repeated_sint64.append(206)
message.repeated_fixed32.append(207)
message.repeated_fixed64.append(208)
message.repeated_sfixed32.append(209)
message.repeated_sfixed64.append(210)
message.repeated_float.append(211)
message.repeated_double.append(212)
message.repeated_bool.append(True)
message.repeated_string.append(u'215')
message.repeated_bytes.append(b'216')
if IsProto2(message):
message.repeatedgroup.add().a = 217
message.repeated_nested_message.add().bb = 218
message.repeated_foreign_message.add().c = 219
message.repeated_import_message.add().d = 220
message.repeated_lazy_message.add().bb = 227
message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAR)
message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAR)
if IsProto2(message):
message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAR)
message.repeated_string_piece.append(u'224')
message.repeated_cord.append(u'225')
# Add a second one of each field.
message.repeated_int32.append(301)
message.repeated_int64.append(302)
message.repeated_uint32.append(303)
message.repeated_uint64.append(304)
message.repeated_sint32.append(305)
message.repeated_sint64.append(306)
message.repeated_fixed32.append(307)
message.repeated_fixed64.append(308)
message.repeated_sfixed32.append(309)
message.repeated_sfixed64.append(310)
message.repeated_float.append(311)
message.repeated_double.append(312)
message.repeated_bool.append(False)
message.repeated_string.append(u'315')
message.repeated_bytes.append(b'316')
if IsProto2(message):
message.repeatedgroup.add().a = 317
message.repeated_nested_message.add().bb = 318
message.repeated_foreign_message.add().c = 319
message.repeated_import_message.add().d = 320
message.repeated_lazy_message.add().bb = 327
message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAZ)
message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAZ)
if IsProto2(message):
message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAZ)
message.repeated_string_piece.append(u'324')
message.repeated_cord.append(u'325')
#
# Fields that have defaults.
#
if IsProto2(message):
message.default_int32 = 401
message.default_int64 = 402
message.default_uint32 = 403
message.default_uint64 = 404
message.default_sint32 = 405
message.default_sint64 = 406
message.default_fixed32 = 407
message.default_fixed64 = 408
message.default_sfixed32 = 409
message.default_sfixed64 = 410
message.default_float = 411
message.default_double = 412
message.default_bool = False
message.default_string = '415'
message.default_bytes = b'416'
message.default_nested_enum = unittest_pb2.TestAllTypes.FOO
message.default_foreign_enum = unittest_pb2.FOREIGN_FOO
message.default_import_enum = unittest_import_pb2.IMPORT_FOO
message.default_string_piece = '424'
message.default_cord = '425'
message.oneof_uint32 = 601
message.oneof_nested_message.bb = 602
message.oneof_string = '603'
message.oneof_bytes = b'604'
def SetAllFields(message):
SetAllNonLazyFields(message)
message.optional_lazy_message.bb = 127
def SetAllExtensions(message):
"""Sets every extension in the message to a unique value.
Args:
message: A unittest_pb2.TestAllExtensions instance.
"""
extensions = message.Extensions
pb2 = unittest_pb2
import_pb2 = unittest_import_pb2
#
# Optional fields.
#
extensions[pb2.optional_int32_extension] = 101
extensions[pb2.optional_int64_extension] = 102
extensions[pb2.optional_uint32_extension] = 103
extensions[pb2.optional_uint64_extension] = 104
extensions[pb2.optional_sint32_extension] = 105
extensions[pb2.optional_sint64_extension] = 106
extensions[pb2.optional_fixed32_extension] = 107
extensions[pb2.optional_fixed64_extension] = 108
extensions[pb2.optional_sfixed32_extension] = 109
extensions[pb2.optional_sfixed64_extension] = 110
extensions[pb2.optional_float_extension] = 111
extensions[pb2.optional_double_extension] = 112
extensions[pb2.optional_bool_extension] = True
extensions[pb2.optional_string_extension] = u'115'
extensions[pb2.optional_bytes_extension] = b'116'
extensions[pb2.optionalgroup_extension].a = 117
extensions[pb2.optional_nested_message_extension].bb = 118
extensions[pb2.optional_foreign_message_extension].c = 119
extensions[pb2.optional_import_message_extension].d = 120
extensions[pb2.optional_public_import_message_extension].e = 126
extensions[pb2.optional_lazy_message_extension].bb = 127
extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ
extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ
extensions[pb2.optional_foreign_enum_extension] = pb2.FOREIGN_BAZ
extensions[pb2.optional_import_enum_extension] = import_pb2.IMPORT_BAZ
extensions[pb2.optional_string_piece_extension] = u'124'
extensions[pb2.optional_cord_extension] = u'125'
#
# Repeated fields.
#
extensions[pb2.repeated_int32_extension].append(201)
extensions[pb2.repeated_int64_extension].append(202)
extensions[pb2.repeated_uint32_extension].append(203)
extensions[pb2.repeated_uint64_extension].append(204)
extensions[pb2.repeated_sint32_extension].append(205)
extensions[pb2.repeated_sint64_extension].append(206)
extensions[pb2.repeated_fixed32_extension].append(207)
extensions[pb2.repeated_fixed64_extension].append(208)
extensions[pb2.repeated_sfixed32_extension].append(209)
extensions[pb2.repeated_sfixed64_extension].append(210)
extensions[pb2.repeated_float_extension].append(211)
extensions[pb2.repeated_double_extension].append(212)
extensions[pb2.repeated_bool_extension].append(True)
extensions[pb2.repeated_string_extension].append(u'215')
extensions[pb2.repeated_bytes_extension].append(b'216')
extensions[pb2.repeatedgroup_extension].add().a = 217
extensions[pb2.repeated_nested_message_extension].add().bb = 218
extensions[pb2.repeated_foreign_message_extension].add().c = 219
extensions[pb2.repeated_import_message_extension].add().d = 220
extensions[pb2.repeated_lazy_message_extension].add().bb = 227
extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAR)
extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAR)
extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAR)
extensions[pb2.repeated_string_piece_extension].append(u'224')
extensions[pb2.repeated_cord_extension].append(u'225')
# Append a second one of each field.
extensions[pb2.repeated_int32_extension].append(301)
extensions[pb2.repeated_int64_extension].append(302)
extensions[pb2.repeated_uint32_extension].append(303)
extensions[pb2.repeated_uint64_extension].append(304)
extensions[pb2.repeated_sint32_extension].append(305)
extensions[pb2.repeated_sint64_extension].append(306)
extensions[pb2.repeated_fixed32_extension].append(307)
extensions[pb2.repeated_fixed64_extension].append(308)
extensions[pb2.repeated_sfixed32_extension].append(309)
extensions[pb2.repeated_sfixed64_extension].append(310)
extensions[pb2.repeated_float_extension].append(311)
extensions[pb2.repeated_double_extension].append(312)
extensions[pb2.repeated_bool_extension].append(False)
extensions[pb2.repeated_string_extension].append(u'315')
extensions[pb2.repeated_bytes_extension].append(b'316')
extensions[pb2.repeatedgroup_extension].add().a = 317
extensions[pb2.repeated_nested_message_extension].add().bb = 318
extensions[pb2.repeated_foreign_message_extension].add().c = 319
extensions[pb2.repeated_import_message_extension].add().d = 320
extensions[pb2.repeated_lazy_message_extension].add().bb = 327
extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAZ)
extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAZ)
extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAZ)
extensions[pb2.repeated_string_piece_extension].append(u'324')
extensions[pb2.repeated_cord_extension].append(u'325')
#
# Fields with defaults.
#
extensions[pb2.default_int32_extension] = 401
extensions[pb2.default_int64_extension] = 402
extensions[pb2.default_uint32_extension] = 403
extensions[pb2.default_uint64_extension] = 404
extensions[pb2.default_sint32_extension] = 405
extensions[pb2.default_sint64_extension] = 406
extensions[pb2.default_fixed32_extension] = 407
extensions[pb2.default_fixed64_extension] = 408
extensions[pb2.default_sfixed32_extension] = 409
extensions[pb2.default_sfixed64_extension] = 410
extensions[pb2.default_float_extension] = 411
extensions[pb2.default_double_extension] = 412
extensions[pb2.default_bool_extension] = False
extensions[pb2.default_string_extension] = u'415'
extensions[pb2.default_bytes_extension] = b'416'
extensions[pb2.default_nested_enum_extension] = pb2.TestAllTypes.FOO
extensions[pb2.default_foreign_enum_extension] = pb2.FOREIGN_FOO
extensions[pb2.default_import_enum_extension] = import_pb2.IMPORT_FOO
extensions[pb2.default_string_piece_extension] = u'424'
extensions[pb2.default_cord_extension] = '425'
extensions[pb2.oneof_uint32_extension] = 601
extensions[pb2.oneof_nested_message_extension].bb = 602
extensions[pb2.oneof_string_extension] = u'603'
extensions[pb2.oneof_bytes_extension] = b'604'
def SetAllFieldsAndExtensions(message):
"""Sets every field and extension in the message to a unique value.
Args:
message: A unittest_pb2.TestAllExtensions message.
"""
message.my_int = 1
message.my_string = 'foo'
message.my_float = 1.0
message.Extensions[unittest_pb2.my_extension_int] = 23
message.Extensions[unittest_pb2.my_extension_string] = 'bar'
def ExpectAllFieldsAndExtensionsInOrder(serialized):
"""Ensures that serialized is the serialization we expect for a message
filled with SetAllFieldsAndExtensions(). (Specifically, ensures that the
serialization is in canonical, tag-number order).
"""
my_extension_int = unittest_pb2.my_extension_int
my_extension_string = unittest_pb2.my_extension_string
expected_strings = []
message = unittest_pb2.TestFieldOrderings()
message.my_int = 1 # Field 1.
expected_strings.append(message.SerializeToString())
message.Clear()
message.Extensions[my_extension_int] = 23 # Field 5.
expected_strings.append(message.SerializeToString())
message.Clear()
message.my_string = 'foo' # Field 11.
expected_strings.append(message.SerializeToString())
message.Clear()
message.Extensions[my_extension_string] = 'bar' # Field 50.
expected_strings.append(message.SerializeToString())
message.Clear()
message.my_float = 1.0
expected_strings.append(message.SerializeToString())
message.Clear()
expected = b''.join(expected_strings)
if expected != serialized:
raise ValueError('Expected %r, found %r' % (expected, serialized))
def ExpectAllFieldsSet(test_case, message):
"""Check all fields for correct values have after Set*Fields() is called."""
test_case.assertTrue(message.HasField('optional_int32'))
test_case.assertTrue(message.HasField('optional_int64'))
test_case.assertTrue(message.HasField('optional_uint32'))
test_case.assertTrue(message.HasField('optional_uint64'))
test_case.assertTrue(message.HasField('optional_sint32'))
test_case.assertTrue(message.HasField('optional_sint64'))
test_case.assertTrue(message.HasField('optional_fixed32'))
test_case.assertTrue(message.HasField('optional_fixed64'))
test_case.assertTrue(message.HasField('optional_sfixed32'))
test_case.assertTrue(message.HasField('optional_sfixed64'))
test_case.assertTrue(message.HasField('optional_float'))
test_case.assertTrue(message.HasField('optional_double'))
test_case.assertTrue(message.HasField('optional_bool'))
test_case.assertTrue(message.HasField('optional_string'))
test_case.assertTrue(message.HasField('optional_bytes'))
if IsProto2(message):
test_case.assertTrue(message.HasField('optionalgroup'))
test_case.assertTrue(message.HasField('optional_nested_message'))
test_case.assertTrue(message.HasField('optional_foreign_message'))
test_case.assertTrue(message.HasField('optional_import_message'))
test_case.assertTrue(message.optionalgroup.HasField('a'))
test_case.assertTrue(message.optional_nested_message.HasField('bb'))
test_case.assertTrue(message.optional_foreign_message.HasField('c'))
test_case.assertTrue(message.optional_import_message.HasField('d'))
test_case.assertTrue(message.HasField('optional_nested_enum'))
test_case.assertTrue(message.HasField('optional_foreign_enum'))
if IsProto2(message):
test_case.assertTrue(message.HasField('optional_import_enum'))
test_case.assertTrue(message.HasField('optional_string_piece'))
test_case.assertTrue(message.HasField('optional_cord'))
test_case.assertEqual(101, message.optional_int32)
test_case.assertEqual(102, message.optional_int64)
test_case.assertEqual(103, message.optional_uint32)
test_case.assertEqual(104, message.optional_uint64)
test_case.assertEqual(105, message.optional_sint32)
test_case.assertEqual(106, message.optional_sint64)
test_case.assertEqual(107, message.optional_fixed32)
test_case.assertEqual(108, message.optional_fixed64)
test_case.assertEqual(109, message.optional_sfixed32)
test_case.assertEqual(110, message.optional_sfixed64)
test_case.assertEqual(111, message.optional_float)
test_case.assertEqual(112, message.optional_double)
test_case.assertEqual(True, message.optional_bool)
test_case.assertEqual('115', message.optional_string)
test_case.assertEqual(b'116', message.optional_bytes)
if IsProto2(message):
test_case.assertEqual(117, message.optionalgroup.a)
test_case.assertEqual(118, message.optional_nested_message.bb)
test_case.assertEqual(119, message.optional_foreign_message.c)
test_case.assertEqual(120, message.optional_import_message.d)
test_case.assertEqual(126, message.optional_public_import_message.e)
test_case.assertEqual(127, message.optional_lazy_message.bb)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
test_case.assertEqual(unittest_pb2.FOREIGN_BAZ,
message.optional_foreign_enum)
if IsProto2(message):
test_case.assertEqual(unittest_import_pb2.IMPORT_BAZ,
message.optional_import_enum)
# -----------------------------------------------------------------
test_case.assertEqual(2, len(message.repeated_int32))
test_case.assertEqual(2, len(message.repeated_int64))
test_case.assertEqual(2, len(message.repeated_uint32))
test_case.assertEqual(2, len(message.repeated_uint64))
test_case.assertEqual(2, len(message.repeated_sint32))
test_case.assertEqual(2, len(message.repeated_sint64))
test_case.assertEqual(2, len(message.repeated_fixed32))
test_case.assertEqual(2, len(message.repeated_fixed64))
test_case.assertEqual(2, len(message.repeated_sfixed32))
test_case.assertEqual(2, len(message.repeated_sfixed64))
test_case.assertEqual(2, len(message.repeated_float))
test_case.assertEqual(2, len(message.repeated_double))
test_case.assertEqual(2, len(message.repeated_bool))
test_case.assertEqual(2, len(message.repeated_string))
test_case.assertEqual(2, len(message.repeated_bytes))
if IsProto2(message):
test_case.assertEqual(2, len(message.repeatedgroup))
test_case.assertEqual(2, len(message.repeated_nested_message))
test_case.assertEqual(2, len(message.repeated_foreign_message))
test_case.assertEqual(2, len(message.repeated_import_message))
test_case.assertEqual(2, len(message.repeated_nested_enum))
test_case.assertEqual(2, len(message.repeated_foreign_enum))
if IsProto2(message):
test_case.assertEqual(2, len(message.repeated_import_enum))
test_case.assertEqual(2, len(message.repeated_string_piece))
test_case.assertEqual(2, len(message.repeated_cord))
test_case.assertEqual(201, message.repeated_int32[0])
test_case.assertEqual(202, message.repeated_int64[0])
test_case.assertEqual(203, message.repeated_uint32[0])
test_case.assertEqual(204, message.repeated_uint64[0])
test_case.assertEqual(205, message.repeated_sint32[0])
test_case.assertEqual(206, message.repeated_sint64[0])
test_case.assertEqual(207, message.repeated_fixed32[0])
test_case.assertEqual(208, message.repeated_fixed64[0])
test_case.assertEqual(209, message.repeated_sfixed32[0])
test_case.assertEqual(210, message.repeated_sfixed64[0])
test_case.assertEqual(211, message.repeated_float[0])
test_case.assertEqual(212, message.repeated_double[0])
test_case.assertEqual(True, message.repeated_bool[0])
test_case.assertEqual('215', message.repeated_string[0])
test_case.assertEqual(b'216', message.repeated_bytes[0])
if IsProto2(message):
test_case.assertEqual(217, message.repeatedgroup[0].a)
test_case.assertEqual(218, message.repeated_nested_message[0].bb)
test_case.assertEqual(219, message.repeated_foreign_message[0].c)
test_case.assertEqual(220, message.repeated_import_message[0].d)
test_case.assertEqual(227, message.repeated_lazy_message[0].bb)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAR,
message.repeated_nested_enum[0])
test_case.assertEqual(unittest_pb2.FOREIGN_BAR,
message.repeated_foreign_enum[0])
if IsProto2(message):
test_case.assertEqual(unittest_import_pb2.IMPORT_BAR,
message.repeated_import_enum[0])
test_case.assertEqual(301, message.repeated_int32[1])
test_case.assertEqual(302, message.repeated_int64[1])
test_case.assertEqual(303, message.repeated_uint32[1])
test_case.assertEqual(304, message.repeated_uint64[1])
test_case.assertEqual(305, message.repeated_sint32[1])
test_case.assertEqual(306, message.repeated_sint64[1])
test_case.assertEqual(307, message.repeated_fixed32[1])
test_case.assertEqual(308, message.repeated_fixed64[1])
test_case.assertEqual(309, message.repeated_sfixed32[1])
test_case.assertEqual(310, message.repeated_sfixed64[1])
test_case.assertEqual(311, message.repeated_float[1])
test_case.assertEqual(312, message.repeated_double[1])
test_case.assertEqual(False, message.repeated_bool[1])
test_case.assertEqual('315', message.repeated_string[1])
test_case.assertEqual(b'316', message.repeated_bytes[1])
if IsProto2(message):
test_case.assertEqual(317, message.repeatedgroup[1].a)
test_case.assertEqual(318, message.repeated_nested_message[1].bb)
test_case.assertEqual(319, message.repeated_foreign_message[1].c)
test_case.assertEqual(320, message.repeated_import_message[1].d)
test_case.assertEqual(327, message.repeated_lazy_message[1].bb)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.repeated_nested_enum[1])
test_case.assertEqual(unittest_pb2.FOREIGN_BAZ,
message.repeated_foreign_enum[1])
if IsProto2(message):
test_case.assertEqual(unittest_import_pb2.IMPORT_BAZ,
message.repeated_import_enum[1])
# -----------------------------------------------------------------
if IsProto2(message):
test_case.assertTrue(message.HasField('default_int32'))
test_case.assertTrue(message.HasField('default_int64'))
test_case.assertTrue(message.HasField('default_uint32'))
test_case.assertTrue(message.HasField('default_uint64'))
test_case.assertTrue(message.HasField('default_sint32'))
test_case.assertTrue(message.HasField('default_sint64'))
test_case.assertTrue(message.HasField('default_fixed32'))
test_case.assertTrue(message.HasField('default_fixed64'))
test_case.assertTrue(message.HasField('default_sfixed32'))
test_case.assertTrue(message.HasField('default_sfixed64'))
test_case.assertTrue(message.HasField('default_float'))
test_case.assertTrue(message.HasField('default_double'))
test_case.assertTrue(message.HasField('default_bool'))
test_case.assertTrue(message.HasField('default_string'))
test_case.assertTrue(message.HasField('default_bytes'))
test_case.assertTrue(message.HasField('default_nested_enum'))
test_case.assertTrue(message.HasField('default_foreign_enum'))
test_case.assertTrue(message.HasField('default_import_enum'))
test_case.assertEqual(401, message.default_int32)
test_case.assertEqual(402, message.default_int64)
test_case.assertEqual(403, message.default_uint32)
test_case.assertEqual(404, message.default_uint64)
test_case.assertEqual(405, message.default_sint32)
test_case.assertEqual(406, message.default_sint64)
test_case.assertEqual(407, message.default_fixed32)
test_case.assertEqual(408, message.default_fixed64)
test_case.assertEqual(409, message.default_sfixed32)
test_case.assertEqual(410, message.default_sfixed64)
test_case.assertEqual(411, message.default_float)
test_case.assertEqual(412, message.default_double)
test_case.assertEqual(False, message.default_bool)
test_case.assertEqual('415', message.default_string)
test_case.assertEqual(b'416', message.default_bytes)
test_case.assertEqual(unittest_pb2.TestAllTypes.FOO,
message.default_nested_enum)
test_case.assertEqual(unittest_pb2.FOREIGN_FOO,
message.default_foreign_enum)
test_case.assertEqual(unittest_import_pb2.IMPORT_FOO,
message.default_import_enum)
def GoldenFile(filename):
"""Finds the given golden file and returns a file object representing it."""
# Search up the directory tree looking for the C++ protobuf source code.
path = '.'
while os.path.exists(path):
if os.path.exists(os.path.join(path, 'src/google/protobuf')):
# Found it. Load the golden file from the testdata directory.
full_path = os.path.join(path, 'src/google/protobuf/testdata', filename)
return open(full_path, 'rb')
path = os.path.join(path, '..')
# Search internally.
path = '.'
full_path = os.path.join(path, 'third_party/py/google/protobuf/testdata',
filename)
if os.path.exists(full_path):
# Found it. Load the golden file from the testdata directory.
return open(full_path, 'rb')
raise RuntimeError(
'Could not find golden files. This test must be run from within the '
'protobuf source package so that it can read test data files from the '
'C++ source tree.')
def GoldenFileData(filename):
"""Finds the given golden file and returns its contents."""
with GoldenFile(filename) as f:
return f.read()
def SetAllPackedFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A TestPackedTypes instance.
"""
message.packed_int32.extend([601, 701])
message.packed_int64.extend([602, 702])
message.packed_uint32.extend([603, 703])
message.packed_uint64.extend([604, 704])
message.packed_sint32.extend([605, 705])
message.packed_sint64.extend([606, 706])
message.packed_fixed32.extend([607, 707])
message.packed_fixed64.extend([608, 708])
message.packed_sfixed32.extend([609, 709])
message.packed_sfixed64.extend([610, 710])
message.packed_float.extend([611.0, 711.0])
message.packed_double.extend([612.0, 712.0])
message.packed_bool.extend([True, False])
message.packed_enum.extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
def SetAllPackedExtensions(message):
"""Sets every extension in the message to a unique value.
Args:
message: A unittest_pb2.TestPackedExtensions instance.
"""
extensions = message.Extensions
pb2 = unittest_pb2
extensions[pb2.packed_int32_extension].extend([601, 701])
extensions[pb2.packed_int64_extension].extend([602, 702])
extensions[pb2.packed_uint32_extension].extend([603, 703])
extensions[pb2.packed_uint64_extension].extend([604, 704])
extensions[pb2.packed_sint32_extension].extend([605, 705])
extensions[pb2.packed_sint64_extension].extend([606, 706])
extensions[pb2.packed_fixed32_extension].extend([607, 707])
extensions[pb2.packed_fixed64_extension].extend([608, 708])
extensions[pb2.packed_sfixed32_extension].extend([609, 709])
extensions[pb2.packed_sfixed64_extension].extend([610, 710])
extensions[pb2.packed_float_extension].extend([611.0, 711.0])
extensions[pb2.packed_double_extension].extend([612.0, 712.0])
extensions[pb2.packed_bool_extension].extend([True, False])
extensions[pb2.packed_enum_extension].extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
def SetAllUnpackedFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A unittest_pb2.TestUnpackedTypes instance.
"""
message.unpacked_int32.extend([601, 701])
message.unpacked_int64.extend([602, 702])
message.unpacked_uint32.extend([603, 703])
message.unpacked_uint64.extend([604, 704])
message.unpacked_sint32.extend([605, 705])
message.unpacked_sint64.extend([606, 706])
message.unpacked_fixed32.extend([607, 707])
message.unpacked_fixed64.extend([608, 708])
message.unpacked_sfixed32.extend([609, 709])
message.unpacked_sfixed64.extend([610, 710])
message.unpacked_float.extend([611.0, 711.0])
message.unpacked_double.extend([612.0, 712.0])
message.unpacked_bool.extend([True, False])
message.unpacked_enum.extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
| mit |
camradal/ansible | lib/ansible/modules/commands/expect.py | 7 | 7744 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: expect
version_added: 2.0
short_description: Executes a command and responds to prompts
description:
- The M(expect) module executes a command and responds to prompts
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), and C("&") will not work
options:
command:
description:
- the command module takes command to run.
required: true
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: false
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
required: false
chdir:
description:
- cd into this directory before running the command
required: false
responses:
description:
- Mapping of expected string/regex and string to respond with. If the
response is a list, successive matches return successive
responses. List functionality is new in 2.1.
required: true
timeout:
description:
- Amount of time in seconds to wait for the expected strings
default: 30
echo:
description:
- Whether or not to echo out your response strings
default: false
requirements:
- python >= 2.6
- pexpect >= 3.3
notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), etc), you must specify a shell in the command such as
C(/bin/bash -c "/path/to/something | grep else")
- The question, or key, under I(responses) is a python regex match. Case
insensitive searches are indicated with a prefix of C(?i)
- By default, if a question is encountered multiple times, it's string
response will be repeated. If you need different responses for successive
question matches, instead of a string response, use a list of strings as
the response. The list functionality is new in 2.1
author: "Matt Martz (@sivel)"
'''
EXAMPLES = '''
# Case insensitve password string match
- expect:
command: passwd username
responses:
(?i)password: "MySekretPa$$word"
# Generic question with multiple different responses
- expect:
command: /path/to/custom/command
responses:
Question:
- response1
- response2
- response3
'''
import datetime
import os
try:
import pexpect
HAS_PEXPECT = True
except ImportError:
HAS_PEXPECT = False
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
def response_closure(module, question, responses):
resp_gen = (u'%s\n' % to_text(r).rstrip(u'\n') for r in responses)
def wrapped(info):
try:
return resp_gen.next()
except StopIteration:
module.fail_json(msg="No remaining responses for '%s', "
"output was '%s'" %
(question,
info['child_result_list'][-1]))
return wrapped
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=True),
chdir=dict(),
creates=dict(),
removes=dict(),
responses=dict(type='dict', required=True),
timeout=dict(type='int', default=30),
echo=dict(type='bool', default=False),
)
)
if not HAS_PEXPECT:
module.fail_json(msg='The pexpect python module is required')
chdir = module.params['chdir']
args = module.params['command']
creates = module.params['creates']
removes = module.params['removes']
responses = module.params['responses']
timeout = module.params['timeout']
echo = module.params['echo']
events = dict()
for key, value in responses.items():
if isinstance(value, list):
response = response_closure(module, key, value)
else:
response = u'%s\n' % to_text(value).rstrip(u'\n')
events[key.decode()] = response
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
if chdir:
chdir = os.path.abspath(os.path.expanduser(chdir))
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
v = os.path.expanduser(creates)
if os.path.exists(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % v,
changed=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
v = os.path.expanduser(removes)
if not os.path.exists(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % v,
changed=False,
rc=0
)
startd = datetime.datetime.now()
try:
try:
# Prefer pexpect.run from pexpect>=4
out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo,
encoding='utf-8')
except TypeError:
# Use pexpect.runu in pexpect>=3.3,<4
out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo)
except (TypeError, AttributeError):
e = get_exception()
# This should catch all insufficient versions of pexpect
# We deem them insufficient for their lack of ability to specify
# to not echo responses via the run/runu functions, which would
# potentially leak sensentive information
module.fail_json(msg='Insufficient version of pexpect installed '
'(%s), this module requires pexpect>=3.3. '
'Error was %s' % (pexpect.__version__, e))
except pexpect.ExceptionPexpect:
e = get_exception()
module.fail_json(msg='%s' % e)
endd = datetime.datetime.now()
delta = endd - startd
if out is None:
out = ''
ret = dict(
cmd=args,
stdout=out.rstrip('\r\n'),
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=True,
)
if rc is not None:
module.exit_json(**ret)
else:
ret['msg'] = 'command exceeded timeout'
module.fail_json(**ret)
if __name__ == '__main__':
main()
| gpl-3.0 |
pio-masaki/kernel_at1s0 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
Philippe12/external_chromium_org | tools/perf/measurements/session_restore.py | 23 | 2150 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import startup
from metrics import cpu
from metrics import startup_metric
class SessionRestore(startup.Startup):
"""Performs a measurement of Chromium's Session restore performance.
This test is meant to be run against a generated profile.
This test inherits support for the --warm or --cold command line options -
see startup.py for details.
"""
def __init__(self):
super(SessionRestore, self).__init__()
self.close_tabs_before_run = False
self._cpu_metric = None
def CustomizeBrowserOptions(self, options):
super(SessionRestore, self).CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs([
'--restore-last-session'
])
def CanRunForPage(self, page):
# No matter how many pages in the pageset, just perform one test iteration.
return page.page_set.pages.index(page) == 0
def RunNavigateSteps(self, page, tab):
# Overriden so that no page navigation occurs.
pass
def ValidatePageSet(self, page_set):
# Reject any pageset that contains more than one WPR archive.
wpr_archives = {}
for page in page_set:
wpr_archives[page_set.WprFilePathForPage(page)] = True
if len(wpr_archives.keys()) > 1:
raise Exception("Invalid pageset: more than 1 WPR archive found.: " +
', '.join(wpr_archives.keys()))
def DidStartBrowser(self, browser):
self._cpu_metric = cpu.CpuMetric(browser)
self._cpu_metric.Start(None, None)
def MeasurePage(self, page, tab, results):
# Wait for all tabs to finish loading.
for i in xrange(len(tab.browser.tabs)):
t = tab.browser.tabs[i]
t.WaitForDocumentReadyStateToBeComplete()
# Record CPU usage from browser start to when all pages have loaded.
self._cpu_metric.Stop(None, None)
self._cpu_metric.AddResults(tab, results, 'cpu_utilization')
startup_metric.StartupMetric().AddResults(tab, results)
# TODO(jeremy): Measure time to load - first, last and frontmost tab here. | bsd-3-clause |
av8ramit/tensorflow | tensorflow/python/keras/_impl/keras/optimizers.py | 4 | 25991 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Built-in optimizer classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import ops
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras._impl.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer as tf_optimizer_module
def clip_norm(g, c, n):
"""Clip a tensor by norm.
Arguments:
g: gradient tensor to clip.
c: clipping threshold.
n: norm of gradient tensor.
Returns:
Clipped gradient tensor.
"""
if c > 0:
condition = n >= c
then_expression = lambda: math_ops.scalar_mul(c / n, g)
else_expression = lambda: g
# saving the shape to avoid converting sparse tensor to dense
if isinstance(g, ops.Tensor):
g_shape = copy.copy(g.get_shape())
elif isinstance(g, ops.IndexedSlices):
g_shape = copy.copy(g.dense_shape)
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
g = control_flow_ops.cond(condition, then_expression, else_expression)
if isinstance(g, ops.Tensor):
g.set_shape(g_shape)
elif isinstance(g, ops.IndexedSlices):
g._dense_shape = g_shape # pylint: disable=protected-access
return g
class Optimizer(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
grads = K.gradients(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the optimizer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError(
'Optimizer weight shape ' + str(pv.shape) + ' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
Returns:
A list of numpy arrays.
"""
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
Arguments:
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter that accelerates SGD
in the relevant direction and dampens oscillations.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. /
(1. + self.decay * K.cast(self.iterations, K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(K.update(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov
}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
This optimizer is usually a good choice for recurrent
neural networks.
Arguments:
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):
super(RMSprop, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. /
(1. + self.decay * K.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * K.square(g)
self.updates.append(K.update(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
"""Adagrad optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
epsilon: float >= 0. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. /
(1. + self.decay * K.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + K.square(g) # update accumulator
self.updates.append(K.update(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Optimizer):
"""Adadelta optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
It is recommended to leave it at the default value.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. /
(1. + self.decay * K.cast(self.iterations, K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * K.square(g)
self.updates.append(K.update(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * K.square(update)
self.updates.append(K.update(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adam(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
amsgrad: boolean. Whether to apply the AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
Beyond".
"""
def __init__(self,
lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
amsgrad=False,
**kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. /
(1. + self.decay * K.cast(self.iterations, K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (
K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
if self.amsgrad:
vhat_t = K.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(K.update(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adamax(Optimizer):
"""Adamax optimizer from Adam paper's Section 7.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
**kwargs):
super(Adamax, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. /
(1. + self.decay * K.cast(self.iterations, K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr / (1. - K.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
# zero init of 1st moment
ms = [K.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = K.maximum(self.beta_2 * u, K.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Nadam(Optimizer):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
schedule_decay=0.004,
**kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1., name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
t = K.cast(self.iterations, K.floatx()) + 1
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1. - 0.5 * (K.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 *
(K.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
v_t_prime = v_t / (1. - K.pow(self.beta_2, t))
m_t_bar = (
1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon,
'schedule_decay': self.schedule_decay
}
base_config = super(Nadam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer):
"""Wrapper class for native TensorFlow optimizers.
"""
def __init__(self, optimizer): # pylint: disable=super-init-not-called
self.optimizer = optimizer
with K.name_scope(self.__class__.__name__):
if context.in_graph_mode():
self.iterations = K.variable(0, dtype='int64', name='iterations')
def apply_gradients(self, grads):
self.optimizer.apply_gradients(grads)
def get_grads(self, loss, params):
return self.optimizer.compute_gradients(loss, params)
def get_updates(self, loss, params):
grads = self.optimizer.compute_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
# Aliases.
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
def serialize(optimizer):
return serialize_keras_object(optimizer)
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Arguments:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping
names (strings) to custom objects
(classes and functions)
to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
all_classes = {
'sgd': SGD,
'rmsprop': RMSprop,
'adagrad': Adagrad,
'adadelta': Adadelta,
'adam': Adam,
'adamax': Adamax,
'nadam': Nadam,
'tfoptimizer': TFOptimizer,
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Arguments:
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary.
- Keras Optimizer instance (it will be returned unchanged).
- TensorFlow Optimizer instance
(it will be wrapped as a Keras Optimizer).
Returns:
A Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
# Wrap TF optimizer instances
if isinstance(identifier, tf_optimizer_module.Optimizer):
return TFOptimizer(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
if isinstance(identifier, Optimizer):
return identifier
else:
raise ValueError('Could not interpret optimizer identifier:', identifier)
| apache-2.0 |
BarcampBangalore/Barcamp-Bangalore-Android-App | gcm_flask/flask/config.py | 50 | 6150 | # -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import imp
import os
import errno
from werkzeug.utils import import_string
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
execfile(filename, d.__dict__)
except IOError, e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, basestring):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| apache-2.0 |
foodszhang/kbengine | kbe/src/lib/python/Lib/test/test_email/test_message.py | 72 | 26378 | import unittest
import textwrap
from email import policy, message_from_string
from email.message import EmailMessage, MIMEPart
from test.test_email import TestEmailBase, parameterize
# Helper.
def first(iterable):
return next(filter(lambda x: x is not None, iterable), None)
class Test(TestEmailBase):
policy = policy.default
def test_error_on_setitem_if_max_count_exceeded(self):
m = self._str_msg("")
m['To'] = 'abc@xyz'
with self.assertRaises(ValueError):
m['To'] = 'xyz@abc'
def test_rfc2043_auto_decoded_and_emailmessage_used(self):
m = message_from_string(textwrap.dedent("""\
Subject: Ayons asperges pour le =?utf-8?q?d=C3=A9jeuner?=
From: =?utf-8?q?Pep=C3=A9?= Le Pew <pepe@example.com>
To: "Penelope Pussycat" <"penelope@example.com">
MIME-Version: 1.0
Content-Type: text/plain; charset="utf-8"
sample text
"""), policy=policy.default)
self.assertEqual(m['subject'], "Ayons asperges pour le déjeuner")
self.assertEqual(m['from'], "Pepé Le Pew <pepe@example.com>")
self.assertIsInstance(m, EmailMessage)
@parameterize
class TestEmailMessageBase:
policy = policy.default
# The first argument is a triple (related, html, plain) of indices into the
# list returned by 'walk' called on a Message constructed from the third.
# The indices indicate which part should match the corresponding part-type
# when passed to get_body (ie: the "first" part of that type in the
# message). The second argument is a list of indices into the 'walk' list
# of the attachments that should be returned by a call to
# 'iter_attachments'. The third argument is a list of indices into 'walk'
# that should be returned by a call to 'iter_parts'. Note that the first
# item returned by 'walk' is the Message itself.
message_params = {
'empty_message': (
(None, None, 0),
(),
(),
""),
'non_mime_plain': (
(None, None, 0),
(),
(),
textwrap.dedent("""\
To: foo@example.com
simple text body
""")),
'mime_non_text': (
(None, None, None),
(),
(),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: image/jpg
bogus body.
""")),
'plain_html_alternative': (
(None, 2, 1),
(),
(1, 2),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/alternative; boundary="==="
preamble
--===
Content-Type: text/plain
simple body
--===
Content-Type: text/html
<p>simple body</p>
--===--
""")),
'plain_html_mixed': (
(None, 2, 1),
(),
(1, 2),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==="
preamble
--===
Content-Type: text/plain
simple body
--===
Content-Type: text/html
<p>simple body</p>
--===--
""")),
'plain_html_attachment_mixed': (
(None, None, 1),
(2,),
(1, 2),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==="
--===
Content-Type: text/plain
simple body
--===
Content-Type: text/html
Content-Disposition: attachment
<p>simple body</p>
--===--
""")),
'html_text_attachment_mixed': (
(None, 2, None),
(1,),
(1, 2),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==="
--===
Content-Type: text/plain
Content-Disposition: AtTaChment
simple body
--===
Content-Type: text/html
<p>simple body</p>
--===--
""")),
'html_text_attachment_inline_mixed': (
(None, 2, 1),
(),
(1, 2),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==="
--===
Content-Type: text/plain
Content-Disposition: InLine
simple body
--===
Content-Type: text/html
Content-Disposition: inline
<p>simple body</p>
--===--
""")),
# RFC 2387
'related': (
(0, 1, None),
(2,),
(1, 2),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/related; boundary="==="; type=text/html
--===
Content-Type: text/html
<p>simple body</p>
--===
Content-Type: image/jpg
Content-ID: <image1>
bogus data
--===--
""")),
# This message structure will probably never be seen in the wild, but
# it proves we distinguish between text parts based on 'start'. The
# content would not, of course, actually work :)
'related_with_start': (
(0, 2, None),
(1,),
(1, 2),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/related; boundary="==="; type=text/html;
start="<body>"
--===
Content-Type: text/html
Content-ID: <include>
useless text
--===
Content-Type: text/html
Content-ID: <body>
<p>simple body</p>
<!--#include file="<include>"-->
--===--
""")),
'mixed_alternative_plain_related': (
(3, 4, 2),
(6, 7),
(1, 6, 7),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==="
--===
Content-Type: multipart/alternative; boundary="+++"
--+++
Content-Type: text/plain
simple body
--+++
Content-Type: multipart/related; boundary="___"
--___
Content-Type: text/html
<p>simple body</p>
--___
Content-Type: image/jpg
Content-ID: <image1@cid>
bogus jpg body
--___--
--+++--
--===
Content-Type: image/jpg
Content-Disposition: attachment
bogus jpg body
--===
Content-Type: image/jpg
Content-Disposition: AttacHmenT
another bogus jpg body
--===--
""")),
# This structure suggested by Stephen J. Turnbull...may not exist/be
# supported in the wild, but we want to support it.
'mixed_related_alternative_plain_html': (
(1, 4, 3),
(6, 7),
(1, 6, 7),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==="
--===
Content-Type: multipart/related; boundary="+++"
--+++
Content-Type: multipart/alternative; boundary="___"
--___
Content-Type: text/plain
simple body
--___
Content-Type: text/html
<p>simple body</p>
--___--
--+++
Content-Type: image/jpg
Content-ID: <image1@cid>
bogus jpg body
--+++--
--===
Content-Type: image/jpg
Content-Disposition: attachment
bogus jpg body
--===
Content-Type: image/jpg
Content-Disposition: attachment
another bogus jpg body
--===--
""")),
# Same thing, but proving we only look at the root part, which is the
# first one if there isn't any start parameter. That is, this is a
# broken related.
'mixed_related_alternative_plain_html_wrong_order': (
(1, None, None),
(6, 7),
(1, 6, 7),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==="
--===
Content-Type: multipart/related; boundary="+++"
--+++
Content-Type: image/jpg
Content-ID: <image1@cid>
bogus jpg body
--+++
Content-Type: multipart/alternative; boundary="___"
--___
Content-Type: text/plain
simple body
--___
Content-Type: text/html
<p>simple body</p>
--___--
--+++--
--===
Content-Type: image/jpg
Content-Disposition: attachment
bogus jpg body
--===
Content-Type: image/jpg
Content-Disposition: attachment
another bogus jpg body
--===--
""")),
'message_rfc822': (
(None, None, None),
(),
(),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: message/rfc822
To: bar@example.com
From: robot@examp.com
this is a message body.
""")),
'mixed_text_message_rfc822': (
(None, None, 1),
(2,),
(1, 2),
textwrap.dedent("""\
To: foo@example.com
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==="
--===
Content-Type: text/plain
Your message has bounced, ser.
--===
Content-Type: message/rfc822
To: bar@example.com
From: robot@examp.com
this is a message body.
--===--
""")),
}
def message_as_get_body(self, body_parts, attachments, parts, msg):
m = self._str_msg(msg)
allparts = list(m.walk())
expected = [None if n is None else allparts[n] for n in body_parts]
related = 0; html = 1; plain = 2
self.assertEqual(m.get_body(), first(expected))
self.assertEqual(m.get_body(preferencelist=(
'related', 'html', 'plain')),
first(expected))
self.assertEqual(m.get_body(preferencelist=('related', 'html')),
first(expected[related:html+1]))
self.assertEqual(m.get_body(preferencelist=('related', 'plain')),
first([expected[related], expected[plain]]))
self.assertEqual(m.get_body(preferencelist=('html', 'plain')),
first(expected[html:plain+1]))
self.assertEqual(m.get_body(preferencelist=['related']),
expected[related])
self.assertEqual(m.get_body(preferencelist=['html']), expected[html])
self.assertEqual(m.get_body(preferencelist=['plain']), expected[plain])
self.assertEqual(m.get_body(preferencelist=('plain', 'html')),
first(expected[plain:html-1:-1]))
self.assertEqual(m.get_body(preferencelist=('plain', 'related')),
first([expected[plain], expected[related]]))
self.assertEqual(m.get_body(preferencelist=('html', 'related')),
first(expected[html::-1]))
self.assertEqual(m.get_body(preferencelist=('plain', 'html', 'related')),
first(expected[::-1]))
self.assertEqual(m.get_body(preferencelist=('html', 'plain', 'related')),
first([expected[html],
expected[plain],
expected[related]]))
def message_as_iter_attachment(self, body_parts, attachments, parts, msg):
m = self._str_msg(msg)
allparts = list(m.walk())
attachments = [allparts[n] for n in attachments]
self.assertEqual(list(m.iter_attachments()), attachments)
def message_as_iter_parts(self, body_parts, attachments, parts, msg):
m = self._str_msg(msg)
allparts = list(m.walk())
parts = [allparts[n] for n in parts]
self.assertEqual(list(m.iter_parts()), parts)
class _TestContentManager:
def get_content(self, msg, *args, **kw):
return msg, args, kw
def set_content(self, msg, *args, **kw):
self.msg = msg
self.args = args
self.kw = kw
def test_get_content_with_cm(self):
m = self._str_msg('')
cm = self._TestContentManager()
self.assertEqual(m.get_content(content_manager=cm), (m, (), {}))
msg, args, kw = m.get_content('foo', content_manager=cm, bar=1, k=2)
self.assertEqual(msg, m)
self.assertEqual(args, ('foo',))
self.assertEqual(kw, dict(bar=1, k=2))
def test_get_content_default_cm_comes_from_policy(self):
p = policy.default.clone(content_manager=self._TestContentManager())
m = self._str_msg('', policy=p)
self.assertEqual(m.get_content(), (m, (), {}))
msg, args, kw = m.get_content('foo', bar=1, k=2)
self.assertEqual(msg, m)
self.assertEqual(args, ('foo',))
self.assertEqual(kw, dict(bar=1, k=2))
def test_set_content_with_cm(self):
m = self._str_msg('')
cm = self._TestContentManager()
m.set_content(content_manager=cm)
self.assertEqual(cm.msg, m)
self.assertEqual(cm.args, ())
self.assertEqual(cm.kw, {})
m.set_content('foo', content_manager=cm, bar=1, k=2)
self.assertEqual(cm.msg, m)
self.assertEqual(cm.args, ('foo',))
self.assertEqual(cm.kw, dict(bar=1, k=2))
def test_set_content_default_cm_comes_from_policy(self):
cm = self._TestContentManager()
p = policy.default.clone(content_manager=cm)
m = self._str_msg('', policy=p)
m.set_content()
self.assertEqual(cm.msg, m)
self.assertEqual(cm.args, ())
self.assertEqual(cm.kw, {})
m.set_content('foo', bar=1, k=2)
self.assertEqual(cm.msg, m)
self.assertEqual(cm.args, ('foo',))
self.assertEqual(cm.kw, dict(bar=1, k=2))
# outcome is whether xxx_method should raise ValueError error when called
# on multipart/subtype. Blank outcome means it depends on xxx (add
# succeeds, make raises). Note: 'none' means there are content-type
# headers but payload is None...this happening in practice would be very
# unusual, so treating it as if there were content seems reasonable.
# method subtype outcome
subtype_params = (
('related', 'no_content', 'succeeds'),
('related', 'none', 'succeeds'),
('related', 'plain', 'succeeds'),
('related', 'related', ''),
('related', 'alternative', 'raises'),
('related', 'mixed', 'raises'),
('alternative', 'no_content', 'succeeds'),
('alternative', 'none', 'succeeds'),
('alternative', 'plain', 'succeeds'),
('alternative', 'related', 'succeeds'),
('alternative', 'alternative', ''),
('alternative', 'mixed', 'raises'),
('mixed', 'no_content', 'succeeds'),
('mixed', 'none', 'succeeds'),
('mixed', 'plain', 'succeeds'),
('mixed', 'related', 'succeeds'),
('mixed', 'alternative', 'succeeds'),
('mixed', 'mixed', ''),
)
def _make_subtype_test_message(self, subtype):
m = self.message()
payload = None
msg_headers = [
('To', 'foo@bar.com'),
('From', 'bar@foo.com'),
]
if subtype != 'no_content':
('content-shadow', 'Logrus'),
msg_headers.append(('X-Random-Header', 'Corwin'))
if subtype == 'text':
payload = ''
msg_headers.append(('Content-Type', 'text/plain'))
m.set_payload('')
elif subtype != 'no_content':
payload = []
msg_headers.append(('Content-Type', 'multipart/' + subtype))
msg_headers.append(('X-Trump', 'Random'))
m.set_payload(payload)
for name, value in msg_headers:
m[name] = value
return m, msg_headers, payload
def _check_disallowed_subtype_raises(self, m, method_name, subtype, method):
with self.assertRaises(ValueError) as ar:
getattr(m, method)()
exc_text = str(ar.exception)
self.assertIn(subtype, exc_text)
self.assertIn(method_name, exc_text)
def _check_make_multipart(self, m, msg_headers, payload):
count = 0
for name, value in msg_headers:
if not name.lower().startswith('content-'):
self.assertEqual(m[name], value)
count += 1
self.assertEqual(len(m), count+1) # +1 for new Content-Type
part = next(m.iter_parts())
count = 0
for name, value in msg_headers:
if name.lower().startswith('content-'):
self.assertEqual(part[name], value)
count += 1
self.assertEqual(len(part), count)
self.assertEqual(part.get_payload(), payload)
def subtype_as_make(self, method, subtype, outcome):
m, msg_headers, payload = self._make_subtype_test_message(subtype)
make_method = 'make_' + method
if outcome in ('', 'raises'):
self._check_disallowed_subtype_raises(m, method, subtype, make_method)
return
getattr(m, make_method)()
self.assertEqual(m.get_content_maintype(), 'multipart')
self.assertEqual(m.get_content_subtype(), method)
if subtype == 'no_content':
self.assertEqual(len(m.get_payload()), 0)
self.assertEqual(m.items(),
msg_headers + [('Content-Type',
'multipart/'+method)])
else:
self.assertEqual(len(m.get_payload()), 1)
self._check_make_multipart(m, msg_headers, payload)
def subtype_as_make_with_boundary(self, method, subtype, outcome):
# Doing all variation is a bit of overkill...
m = self.message()
if outcome in ('', 'raises'):
m['Content-Type'] = 'multipart/' + subtype
with self.assertRaises(ValueError) as cm:
getattr(m, 'make_' + method)()
return
if subtype == 'plain':
m['Content-Type'] = 'text/plain'
elif subtype != 'no_content':
m['Content-Type'] = 'multipart/' + subtype
getattr(m, 'make_' + method)(boundary="abc")
self.assertTrue(m.is_multipart())
self.assertEqual(m.get_boundary(), 'abc')
def test_policy_on_part_made_by_make_comes_from_message(self):
for method in ('make_related', 'make_alternative', 'make_mixed'):
m = self.message(policy=self.policy.clone(content_manager='foo'))
m['Content-Type'] = 'text/plain'
getattr(m, method)()
self.assertEqual(m.get_payload(0).policy.content_manager, 'foo')
class _TestSetContentManager:
def set_content(self, msg, content, *args, **kw):
msg['Content-Type'] = 'text/plain'
msg.set_payload(content)
def subtype_as_add(self, method, subtype, outcome):
m, msg_headers, payload = self._make_subtype_test_message(subtype)
cm = self._TestSetContentManager()
add_method = 'add_attachment' if method=='mixed' else 'add_' + method
if outcome == 'raises':
self._check_disallowed_subtype_raises(m, method, subtype, add_method)
return
getattr(m, add_method)('test', content_manager=cm)
self.assertEqual(m.get_content_maintype(), 'multipart')
self.assertEqual(m.get_content_subtype(), method)
if method == subtype or subtype == 'no_content':
self.assertEqual(len(m.get_payload()), 1)
for name, value in msg_headers:
self.assertEqual(m[name], value)
part = m.get_payload()[0]
else:
self.assertEqual(len(m.get_payload()), 2)
self._check_make_multipart(m, msg_headers, payload)
part = m.get_payload()[1]
self.assertEqual(part.get_content_type(), 'text/plain')
self.assertEqual(part.get_payload(), 'test')
if method=='mixed':
self.assertEqual(part['Content-Disposition'], 'attachment')
elif method=='related':
self.assertEqual(part['Content-Disposition'], 'inline')
else:
# Otherwise we don't guess.
self.assertIsNone(part['Content-Disposition'])
class _TestSetRaisingContentManager:
def set_content(self, msg, content, *args, **kw):
raise Exception('test')
def test_default_content_manager_for_add_comes_from_policy(self):
cm = self._TestSetRaisingContentManager()
m = self.message(policy=self.policy.clone(content_manager=cm))
for method in ('add_related', 'add_alternative', 'add_attachment'):
with self.assertRaises(Exception) as ar:
getattr(m, method)('')
self.assertEqual(str(ar.exception), 'test')
def message_as_clear(self, body_parts, attachments, parts, msg):
m = self._str_msg(msg)
m.clear()
self.assertEqual(len(m), 0)
self.assertEqual(list(m.items()), [])
self.assertIsNone(m.get_payload())
self.assertEqual(list(m.iter_parts()), [])
def message_as_clear_content(self, body_parts, attachments, parts, msg):
m = self._str_msg(msg)
expected_headers = [h for h in m.keys()
if not h.lower().startswith('content-')]
m.clear_content()
self.assertEqual(list(m.keys()), expected_headers)
self.assertIsNone(m.get_payload())
self.assertEqual(list(m.iter_parts()), [])
def test_is_attachment(self):
m = self._make_message()
self.assertFalse(m.is_attachment())
with self.assertWarns(DeprecationWarning):
self.assertFalse(m.is_attachment)
m['Content-Disposition'] = 'inline'
self.assertFalse(m.is_attachment())
with self.assertWarns(DeprecationWarning):
self.assertFalse(m.is_attachment)
m.replace_header('Content-Disposition', 'attachment')
self.assertTrue(m.is_attachment())
with self.assertWarns(DeprecationWarning):
self.assertTrue(m.is_attachment)
m.replace_header('Content-Disposition', 'AtTachMent')
self.assertTrue(m.is_attachment())
with self.assertWarns(DeprecationWarning):
self.assertTrue(m.is_attachment)
m.set_param('filename', 'abc.png', 'Content-Disposition')
self.assertTrue(m.is_attachment())
with self.assertWarns(DeprecationWarning):
self.assertTrue(m.is_attachment)
class TestEmailMessage(TestEmailMessageBase, TestEmailBase):
message = EmailMessage
def test_set_content_adds_MIME_Version(self):
m = self._str_msg('')
cm = self._TestContentManager()
self.assertNotIn('MIME-Version', m)
m.set_content(content_manager=cm)
self.assertEqual(m['MIME-Version'], '1.0')
class _MIME_Version_adding_CM:
def set_content(self, msg, *args, **kw):
msg['MIME-Version'] = '1.0'
def test_set_content_does_not_duplicate_MIME_Version(self):
m = self._str_msg('')
cm = self._MIME_Version_adding_CM()
self.assertNotIn('MIME-Version', m)
m.set_content(content_manager=cm)
self.assertEqual(m['MIME-Version'], '1.0')
class TestMIMEPart(TestEmailMessageBase, TestEmailBase):
# Doing the full test run here may seem a bit redundant, since the two
# classes are almost identical. But what if they drift apart? So we do
# the full tests so that any future drift doesn't introduce bugs.
message = MIMEPart
def test_set_content_does_not_add_MIME_Version(self):
m = self._str_msg('')
cm = self._TestContentManager()
self.assertNotIn('MIME-Version', m)
m.set_content(content_manager=cm)
self.assertNotIn('MIME-Version', m)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
firerszd/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py | 650 | 1701 | # urllib3/__init__.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = 'dev'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host, Timeout
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added an stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
| lgpl-3.0 |
kapilrastogi/Impala | shell/impala_shell.py | 2 | 54594 | #!/usr/bin/env python
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Impala's shell
import cmd
import errno
import getpass
import os
import prettytable
import random
import re
import shlex
import signal
import socket
import sqlparse
import subprocess
import sys
import textwrap
import time
from impala_client import (ImpalaClient, DisconnectedException, QueryStateException,
RPCException, TApplicationException)
from impala_shell_config_defaults import impala_shell_defaults
from option_parser import get_option_parser, get_config_from_file
from shell_output import DelimitedOutputFormatter, OutputStream, PrettyOutputFormatter
from shell_output import OverwritingStdErrOutputStream
from subprocess import call
from thrift.Thrift import TException
VERSION_FORMAT = "Impala Shell v%(version)s (%(git_hash)s) built on %(build_date)s"
VERSION_STRING = "build version not available"
HISTORY_LENGTH = 100
# Tarball / packaging build makes impala_build_version available
try:
from impala_build_version import get_git_hash, get_build_date, get_version
VERSION_STRING = VERSION_FORMAT % {'version': get_version(),
'git_hash': get_git_hash()[:7],
'build_date': get_build_date()}
except Exception:
pass
class CmdStatus:
"""Values indicate the execution status of a command to the cmd shell driver module
SUCCESS and ERROR continue running the shell and ABORT exits the shell
Since SUCCESS == None, successful commands do not need to explicitly return
anything on completion
"""
SUCCESS = None
ABORT = True
ERROR = False
class ImpalaPrettyTable(prettytable.PrettyTable):
"""Patched version of PrettyTable that TODO"""
def _unicode(self, value):
if not isinstance(value, basestring):
value = str(value)
if not isinstance(value, unicode):
# If a value cannot be encoded, replace it with a placeholder.
value = unicode(value, self.encoding, "replace")
return value
class ImpalaShell(cmd.Cmd):
""" Simple Impala Shell.
Basic usage: type connect <host:port> to connect to an impalad
Then issue queries or other commands. Tab-completion should show the set of
available commands.
Methods that implement shell commands return a boolean tuple (stop, status)
stop is a flag the command loop uses to continue/discontinue the prompt.
Status tells the caller that the command completed successfully.
"""
# If not connected to an impalad, the server version is unknown.
UNKNOWN_SERVER_VERSION = "Not Connected"
DISCONNECTED_PROMPT = "[Not connected] > "
# Message to display in shell when cancelling a query
CANCELLATION_MESSAGE = ' Cancelling Query'
# Number of times to attempt cancellation before giving up.
CANCELLATION_TRIES = 3
# Commands are terminated with the following delimiter.
CMD_DELIM = ';'
# Valid variable name pattern
VALID_VAR_NAME_PATTERN = r'[A-Za-z][A-Za-z0-9_]*'
# Pattern for removal of comments preceding SET commands
COMMENTS_BEFORE_SET_PATTERN = r'^(\s*/\*(.|\n)*?\*/|\s*--.*\n)*\s*((un)?set)'
COMMENTS_BEFORE_SET_REPLACEMENT = r'\3'
# Variable names are prefixed with the following string
VAR_PREFIXES = [ 'VAR', 'HIVEVAR' ]
DEFAULT_DB = 'default'
# Regex applied to all tokens of a query to detect the query type.
INSERT_REGEX = re.compile("^insert$", re.I)
# Seperator for queries in the history file.
HISTORY_FILE_QUERY_DELIM = '_IMP_DELIM_'
VALID_SHELL_OPTIONS = {
'LIVE_PROGRESS' : (lambda x: x in ("true", "TRUE", "True", "1"), "print_progress"),
'LIVE_SUMMARY' : (lambda x: x in ("true", "TRUE", "True", "1"), "print_summary")
}
# Minimum time in seconds between two calls to get the exec summary.
PROGRESS_UPDATE_INTERVAL = 1.0
def __init__(self, options):
cmd.Cmd.__init__(self)
self.is_alive = True
self.impalad = None
self.use_kerberos = options.use_kerberos
self.kerberos_service_name = options.kerberos_service_name
self.use_ssl = options.ssl
self.ca_cert = options.ca_cert
self.user = options.user
self.ldap_password = options.ldap_password
self.use_ldap = options.use_ldap
self.verbose = options.verbose
self.prompt = ImpalaShell.DISCONNECTED_PROMPT
self.server_version = ImpalaShell.UNKNOWN_SERVER_VERSION
self.refresh_after_connect = options.refresh_after_connect
self.current_db = options.default_db
self.history_file = os.path.expanduser("~/.impalahistory")
# Stores the state of user input until a delimiter is seen.
self.partial_cmd = str()
# Stores the old prompt while the user input is incomplete.
self.cached_prompt = str()
self.show_profiles = options.show_profiles
# Output formatting flags/options
self.output_file = options.output_file
self.output_delimiter = options.output_delimiter
self.write_delimited = options.write_delimited
self.print_header = options.print_header
self.progress_stream = OverwritingStdErrOutputStream()
self.set_query_options = {}
self.set_variables = options.variables
self._populate_command_list()
self.imp_client = None;
# Tracks query handle of the last query executed. Used by the 'profile' command.
self.last_query_handle = None;
self.query_handle_closed = None
self.print_summary = options.print_summary
self.print_progress = options.print_progress
# Due to a readline bug in centos/rhel7, importing it causes control characters to be
# printed. This breaks any scripting against the shell in non-interactive mode. Since
# the non-interactive mode does not need readline - do not import it.
if options.query or options.query_file:
self.interactive = False
self._disable_readline()
else:
self.interactive = True
try:
self.readline = __import__('readline')
self.readline.set_history_length(HISTORY_LENGTH)
except ImportError:
self._disable_readline()
if options.impalad is not None:
self.do_connect(options.impalad)
# We handle Ctrl-C ourselves, using an Event object to signal cancellation
# requests between the handler and the main shell thread.
signal.signal(signal.SIGINT, self._signal_handler)
def _populate_command_list(self):
"""Populate a list of commands in the shell.
Each command has its own method of the form do_<command>, and can be extracted by
introspecting the class directory.
"""
# Slice the command method name to get the name of the command.
self.commands = [cmd[3:] for cmd in dir(self.__class__) if cmd.startswith('do_')]
def _disable_readline(self):
"""Disables the readline module.
The readline module is responsible for keeping track of command history.
"""
self.readline = None
def _print_options(self, default_options, set_options):
# Prints the current query options
# with default values distinguished from set values by brackets [], followed by
# shell-local options.
if not default_options and not set_options:
print '\tNo options available.'
else:
for k in sorted(default_options):
if k in set_options and set_options[k] != default_options[k]:
print '\n'.join(["\t%s: %s" % (k, set_options[k])])
else:
print '\n'.join(["\t%s: [%s]" % (k, default_options[k])])
self._print_shell_options()
def _print_variables(self):
# Prints the currently defined variables.
if not self.set_variables:
print '\tNo variables defined.'
else:
for k in sorted(self.set_variables):
print '\n'.join(["\t%s: %s" % (k, self.set_variables[k])])
def _print_shell_options(self):
"""Prints shell options, which are local and independent of query options."""
print "\nShell Options"
for x in self.VALID_SHELL_OPTIONS:
print "\t%s: %s" % (x, self.__dict__[self.VALID_SHELL_OPTIONS[x][1]])
def do_shell(self, args):
"""Run a command on the shell
Usage: shell <cmd>
! <cmd>
"""
try:
start_time = time.time()
os.system(args)
self._print_if_verbose("--------\nExecuted in %2.2fs" % (time.time() - start_time))
except Exception, e:
print_to_stderr('Error running command : %s' % e)
return CmdStatus.ERROR
def _remove_comments_before_set(self, line):
"""SET commands preceded by a comment become a SET query, which are not processed
locally. SET VAR:* commands must be processed locally, since they are not known
to the frontend. Thus, we remove comments that precede SET commands to enforce the
local processing."""
regexp = re.compile(ImpalaShell.COMMENTS_BEFORE_SET_PATTERN, re.IGNORECASE)
return regexp.sub(ImpalaShell.COMMENTS_BEFORE_SET_REPLACEMENT, line, 1)
def sanitise_input(self, args):
"""Convert the command to lower case, so it's recognized"""
# A command terminated by a semi-colon is legal. Check for the trailing
# semi-colons and strip them from the end of the command.
if not self.interactive:
# Strip all the non-interactive commands of the delimiter.
args = self._remove_comments_before_set(args)
tokens = args.strip().split(' ')
tokens[0] = tokens[0].lower()
return ' '.join(tokens).rstrip(ImpalaShell.CMD_DELIM)
# Handle EOF if input is interactive
tokens = args.strip().split(' ')
tokens[0] = tokens[0].lower()
if tokens[0] == 'eof':
if not self.partial_cmd:
# The first token is the command.
# If it's EOF, call do_quit()
return 'quit'
else:
# If a command is in progress and the user hits a Ctrl-D, clear its state
# and reset the prompt.
self.prompt = self.cached_prompt
self.partial_cmd = str()
# The print statement makes the new prompt appear in a new line.
# Also print an extra newline to indicate that the current command has
# been cancelled.
print '\n'
return str()
# The first token is converted into lower case to route it to the
# appropriate command handler. This only applies to the first line of user input.
# Modifying tokens in subsequent lines may change the semantics of the command,
# so do not modify the text.
args = self._check_for_command_completion(args)
args = self._remove_comments_before_set(args)
tokens = args.strip().split(' ')
tokens[0] = tokens[0].lower()
args = ' '.join(tokens).strip()
return args.rstrip(ImpalaShell.CMD_DELIM)
def _shlex_split(self, line):
"""Reimplement shlex.split() so that escaped single quotes
are actually escaped. shlex.split() only escapes double quotes
by default. This method will throw a ValueError if an open
quotation (either single or double) is found.
"""
my_split = shlex.shlex(line, posix=True)
my_split.escapedquotes = '"\''
my_split.whitespace_split = True
my_split.commenters = ''
return list(my_split)
def _cmd_ends_with_delim(self, line):
"""Check if the input command ends with a command delimiter.
A command ending with the delimiter and containing an open quotation character is
not considered terminated. If no open quotation is found, it's considered
terminated.
"""
if line.endswith(ImpalaShell.CMD_DELIM):
try:
# Look for an open quotation in the entire command, and not just the
# current line.
if self.partial_cmd: line = '%s %s' % (self.partial_cmd, line)
self._shlex_split(line)
return True
# If the command ends with a delimiter, check if it has an open quotation.
# shlex in self._split() throws a ValueError iff an open quotation is found.
# A quotation can either be a single quote or a double quote.
except ValueError:
pass
# This checks to see if there are any backslashed quotes
# outside of quotes, since backslashed quotes
# outside of single or double quotes should not be escaped.
# Ex. 'abc\'xyz' -> closed because \' is escaped
# \'abcxyz -> open because \' is not escaped
# \'abcxyz' -> closed
# Iterate through the line and switch the state if a single or double quote is found
# and ignore escaped single and double quotes if the line is considered open (meaning
# a previous single or double quote has not been closed yet)
state_closed = True;
opener = None;
for i, char in enumerate(line):
if state_closed and (char in ['\'', '\"']):
state_closed = False
opener = char
elif not state_closed and opener == char:
if line[i - 1] != '\\':
state_closed = True
opener = None;
return state_closed
return False
def _check_for_command_completion(self, cmd):
"""Check for a delimiter at the end of user input.
The end of the user input is scanned for a legal delimiter.
If a delimiter is not found:
- Input is not send to onecmd()
- onecmd() is a method in Cmd which routes the user input to the
appropriate method. An empty string results in a no-op.
- Input is removed from history.
- Input is appended to partial_cmd
If a delimiter is found:
- The contents of partial_cmd are put in history, as they represent
a completed command.
- The contents are passed to the appropriate method for execution.
- partial_cmd is reset to an empty string.
"""
if self.readline: current_history_len = self.readline.get_current_history_length()
# Input is incomplete, store the contents and do nothing.
if not self._cmd_ends_with_delim(cmd):
# The user input is incomplete, change the prompt to reflect this.
if not self.partial_cmd and cmd:
self.cached_prompt = self.prompt
self.prompt = '> '.rjust(len(self.cached_prompt))
# partial_cmd is already populated, add the current input after a newline.
if self.partial_cmd and cmd:
self.partial_cmd = "%s\n%s" % (self.partial_cmd, cmd)
else:
# If the input string is empty or partial_cmd is empty.
self.partial_cmd = "%s%s" % (self.partial_cmd, cmd)
# Remove the most recent item from history if:
# -- The current state of user input in incomplete.
# -- The most recent user input is not an empty string
if self.readline and current_history_len > 0 and cmd:
self.readline.remove_history_item(current_history_len - 1)
# An empty string results in a no-op. Look at emptyline()
return str()
elif self.partial_cmd: # input ends with a delimiter and partial_cmd is not empty
if cmd != ImpalaShell.CMD_DELIM:
completed_cmd = "%s\n%s" % (self.partial_cmd, cmd)
else:
completed_cmd = "%s%s" % (self.partial_cmd, cmd)
# Reset partial_cmd to an empty string
self.partial_cmd = str()
# Replace the most recent history item with the completed command.
completed_cmd = sqlparse.format(completed_cmd)
if self.readline and current_history_len > 0:
self.readline.replace_history_item(current_history_len - 1,
completed_cmd.encode('utf-8'))
# Revert the prompt to its earlier state
self.prompt = self.cached_prompt
else: # Input has a delimiter and partial_cmd is empty
completed_cmd = sqlparse.format(cmd)
return completed_cmd
def _signal_handler(self, signal, frame):
"""Handles query cancellation on a Ctrl+C event"""
if self.last_query_handle is None or self.query_handle_closed:
return
# Create a new connection to the impalad and cancel the query.
for cancel_try in xrange(ImpalaShell.CANCELLATION_TRIES):
try:
self.query_handle_closed = True
print_to_stderr(ImpalaShell.CANCELLATION_MESSAGE)
new_imp_client = ImpalaClient(self.impalad)
new_imp_client.connect()
new_imp_client.cancel_query(self.last_query_handle, False)
self.imp_client.close_query(self.last_query_handle)
self._validate_database()
break
except Exception, e:
# Suppress harmless errors.
err_msg = str(e).strip()
if err_msg in ['ERROR: Cancelled', 'ERROR: Invalid or unknown query handle']:
break
print_to_stderr("Failed to reconnect and close (try %i/%i): %s" % (
cancel_try + 1, ImpalaShell.CANCELLATION_TRIES, err_msg))
def _replace_variables(self, query):
"""Replaces variable within the query text with their corresponding values"""
errors = False
matches = set(map(lambda v: v.upper(), re.findall(r'(?<!\\)\${([^}]+)}', query)))
for name in matches:
value = None
# Check if syntax is correct
var_name = self._get_var_name(name)
if var_name is None:
print_to_stderr('Error: Unknown substitution syntax (%s). ' % (name,) + \
'Use ${VAR:var_name}.')
errors = True
else:
# Replaces variable value
if self.set_variables and var_name in self.set_variables:
value = self.set_variables[var_name]
regexp = re.compile(r'(?<!\\)\${%s}' % (name,), re.IGNORECASE)
query = regexp.sub(value, query)
else:
print_to_stderr('Error: Unknown variable %s' % (var_name))
errors = True
if errors:
return None
else:
return query
def precmd(self, args):
args = self.sanitise_input(args)
if not args: return args
# Split args using sqlparse. If there are multiple queries present in user input,
# the length of the returned query list will be greater than one.
parsed_cmds = sqlparse.split(args)
if len(parsed_cmds) > 1:
# The last command needs a delimiter to be successfully executed.
parsed_cmds[-1] += ImpalaShell.CMD_DELIM
self.cmdqueue.extend(parsed_cmds)
# If cmdqueue is populated, then commands are executed from the cmdqueue, and user
# input is ignored. Send an empty string as the user input just to be safe.
return str()
try:
self.imp_client.test_connection()
except TException:
print_to_stderr("Connection lost, reconnecting...")
self._connect()
return args.encode('utf-8')
def onecmd(self, line):
"""Overridden to ensure the variable replacement is processed in interactive
as well as non-interactive mode, since the precmd method would only work for
the interactive case, when cmdloop is called.
"""
# Replace variables in the statement before it's executed
line = self._replace_variables(line)
# Cmd is an old-style class, hence we need to call the method directly
# instead of using super()
# TODO: This may have to be changed to a super() call once we move to Python 3
if line == None:
return CmdStatus.ERROR
else:
return cmd.Cmd.onecmd(self, line)
def postcmd(self, status, args):
# status conveys to shell how the shell should continue execution
# should always be a CmdStatus
return status
def do_summary(self, args):
summary = None
try:
summary = self.imp_client.get_summary(self.last_query_handle)
except RPCException:
pass
if summary is None:
print_to_stderr("Could not retrieve summary for query.")
return CmdStatus.ERROR
if summary.nodes is None:
print_to_stderr("Summary not available")
return CmdStatus.SUCCESS
output = []
table = self._default_summary_table()
self.imp_client.build_summary_table(summary, 0, False, 0, False, output)
formatter = PrettyOutputFormatter(table)
self.output_stream = OutputStream(formatter, filename=self.output_file)
self.output_stream.write(output)
def _handle_shell_options(self, token, value):
try:
handle = self.VALID_SHELL_OPTIONS[token]
self.__dict__[handle[1]] = handle[0](value)
return True
except KeyError:
return False
def _get_var_name(self, name):
"""Look for a namespace:var_name pattern in an option name.
Return the variable name if it's a match or None otherwise.
"""
ns_match = re.match(r'^([^:]*):(.*)', name)
if ns_match is not None:
ns = ns_match.group(1)
var_name = ns_match.group(2)
if ns in ImpalaShell.VAR_PREFIXES:
return var_name
return None
def do_set(self, args):
"""Set or display query options.
Display query options:
Usage: SET
Set query options:
Usage: SET <option>=<value>
OR
SET VAR:<variable>=<value>
"""
# TODO: Expand set to allow for setting more than just query options.
if len(args) == 0:
print "Query options (defaults shown in []):"
self._print_options(self.imp_client.default_query_options, self.set_query_options)
print "\nVariables:"
self._print_variables()
return CmdStatus.SUCCESS
# Remove any extra spaces surrounding the tokens.
# Allows queries that have spaces around the = sign.
tokens = [arg.strip() for arg in args.split("=")]
if len(tokens) != 2:
print_to_stderr("Error: SET <option>=<value>")
print_to_stderr(" OR")
print_to_stderr(" SET VAR:<variable>=<value>")
return CmdStatus.ERROR
option_upper = tokens[0].upper()
# Check if it's a variable
var_name = self._get_var_name(option_upper)
if var_name is not None:
# Set the variable
self.set_variables[var_name] = tokens[1]
self._print_if_verbose('Variable %s set to %s' % (var_name, tokens[1]))
elif not self._handle_shell_options(option_upper, tokens[1]):
if option_upper not in self.imp_client.default_query_options.keys():
print "Unknown query option: %s" % (tokens[0])
print "Available query options, with their values (defaults shown in []):"
self._print_options(self.imp_client.default_query_options, self.set_query_options)
return CmdStatus.ERROR
self.set_query_options[option_upper] = tokens[1]
self._print_if_verbose('%s set to %s' % (option_upper, tokens[1]))
def do_unset(self, args):
"""Unset a query option"""
if len(args.split()) != 1:
print 'Usage: unset <option>'
return CmdStatus.ERROR
option = args.upper()
# Check if it's a variable
var_name = self._get_var_name(option)
if var_name is not None:
if self.set_variables.get(var_name):
print 'Unsetting variable %s' % var_name
del self.set_variables[var_name]
else:
print "No variable called %s is set" % var_name
elif self.set_query_options.get(option):
print 'Unsetting option %s' % option
del self.set_query_options[option]
else:
print "No option called %s is set" % option
def do_quit(self, args):
"""Quit the Impala shell"""
self._print_if_verbose("Goodbye " + self.user)
self.is_alive = False
return CmdStatus.ABORT
def do_exit(self, args):
"""Exit the impala shell"""
return self.do_quit(args)
def do_connect(self, args):
"""Connect to an Impalad instance:
Usage: connect, defaults to the fqdn of the localhost and port 21000
connect <hostname:port>
connect <hostname>, defaults to port 21000
"""
# Assume the user wants to connect to the local impalad if no connection string is
# specified. Connecting to a kerberized impalad requires an fqdn as the host name.
if self.use_ldap and self.ldap_password is None:
self.ldap_password = getpass.getpass("LDAP password for %s: " % self.user)
if not args: args = socket.getfqdn()
tokens = args.split(" ")
# validate the connection string.
host_port = [val for val in tokens[0].split(':') if val.strip()]
if (':' in tokens[0] and len(host_port) != 2):
print_to_stderr("Connection string must either be empty, or of the form "
"<hostname[:port]>")
return CmdStatus.ERROR
elif len(host_port) == 1:
host_port.append('21000')
self.impalad = tuple(host_port)
if self.imp_client: self.imp_client.close_connection()
self.imp_client = ImpalaClient(self.impalad, self.use_kerberos,
self.kerberos_service_name, self.use_ssl,
self.ca_cert, self.user, self.ldap_password,
self.use_ldap)
self._connect()
# If the connection fails and the Kerberos has not been enabled,
# check for a valid kerberos ticket and retry the connection
# with kerberos enabled.
if not self.imp_client.connected and not self.use_kerberos:
try:
if call(["klist", "-s"]) == 0:
print_to_stderr(("Kerberos ticket found in the credentials cache, retrying "
"the connection with a secure transport."))
self.imp_client.use_kerberos = True
self.imp_client.use_ldap = False
self.imp_client.ldap_password = None
self._connect()
except OSError, e:
pass
if self.imp_client.connected:
self._print_if_verbose('Connected to %s:%s' % self.impalad)
self._print_if_verbose('Server version: %s' % self.server_version)
self.prompt = "[%s:%s] > " % self.impalad
if self.refresh_after_connect:
self.cmdqueue.append('invalidate metadata' + ImpalaShell.CMD_DELIM)
print_to_stderr("Invalidating Metadata")
self._validate_database()
try:
self.imp_client.build_default_query_options_dict()
except RPCException, e:
print_to_stderr(e)
# In the case that we lost connection while a command was being entered,
# we may have a dangling command, clear partial_cmd
self.partial_cmd = str()
# Check if any of query options set by the user are inconsistent
# with the impalad being connected to
for set_option in self.set_query_options:
if set_option not in set(self.imp_client.default_query_options):
print ('%s is not supported for the impalad being '
'connected to, ignoring.' % set_option)
del self.set_query_options[set_option]
def _connect(self):
try:
server_version = self.imp_client.connect()
if server_version:
self.server_version = server_version
except TApplicationException:
# We get a TApplicationException if the transport is valid,
# but the RPC does not exist.
print_to_stderr("Error: Unable to communicate with impalad service. This "
"service may not be an impalad instance. Check host:port and try again.")
self.imp_client.close_connection()
raise
except ImportError:
print_to_stderr("Unable to import the python 'ssl' module. It is"
" required for an SSL-secured connection.")
sys.exit(1)
except socket.error, (code, e):
# if the socket was interrupted, reconnect the connection with the client
if code == errno.EINTR:
self._reconnect_cancellation()
else:
print_to_stderr("Socket error %s: %s" % (code, e))
self.prompt = self.DISCONNECTED_PROMPT
except Exception, e:
print_to_stderr("Error connecting: %s, %s" % (type(e).__name__, e))
# If a connection to another impalad failed while already connected
# reset the prompt to disconnected.
self.server_version = self.UNKNOWN_SERVER_VERSION
self.prompt = self.DISCONNECTED_PROMPT
def _reconnect_cancellation(self):
self._connect()
self._validate_database()
def _validate_database(self):
if self.current_db:
self.current_db = self.current_db.strip('`')
self.cmdqueue.append(('use `%s`' % self.current_db) + ImpalaShell.CMD_DELIM)
def _print_if_verbose(self, message):
if self.verbose:
print_to_stderr(message)
def print_runtime_profile(self, profile, status=False):
if self.show_profiles or status:
if profile is not None:
print "Query Runtime Profile:\n" + profile
def _parse_table_name_arg(self, arg):
""" Parses an argument string and returns the result as a db name, table name combo.
If the table name was not fully qualified, the current database is returned as the db.
Otherwise, the table is split into db/table name parts and returned.
If an invalid format is provided, None is returned.
"""
if not arg: return
# If a multi-line argument, the name might be split across lines
arg = arg.replace('\n', '')
# Get the database and table name, using the current database if the table name
# wasn't fully qualified.
db_name, tbl_name = self.current_db, arg
if db_name is None:
db_name = ImpalaShell.DEFAULT_DB
db_table_name = arg.split('.')
if len(db_table_name) == 1:
return db_name, db_table_name[0]
if len(db_table_name) == 2:
return db_table_name
def do_alter(self, args):
query = self.imp_client.create_beeswax_query("alter %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_create(self, args):
query = self.imp_client.create_beeswax_query("create %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_drop(self, args):
query = self.imp_client.create_beeswax_query("drop %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_load(self, args):
query = self.imp_client.create_beeswax_query("load %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_profile(self, args):
"""Prints the runtime profile of the last INSERT or SELECT query executed."""
if len(args) > 0:
print_to_stderr("'profile' does not accept any arguments")
return CmdStatus.ERROR
elif self.last_query_handle is None:
print_to_stderr('No previous query available to profile')
return CmdStatus.ERROR
profile = self.imp_client.get_runtime_profile(self.last_query_handle)
return self.print_runtime_profile(profile, True)
def do_select(self, args):
"""Executes a SELECT... query, fetching all rows"""
query = self.imp_client.create_beeswax_query("select %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_compute(self, args):
"""Executes a COMPUTE STATS query.
Impala shell cannot get child query handle so it cannot
query live progress for COMPUTE STATS query. Disable live
progress/summary callback for COMPUTE STATS query."""
query = self.imp_client.create_beeswax_query("compute %s" % args,
self.set_query_options)
(prev_print_progress, prev_print_summary) = self.print_progress, self.print_summary
(self.print_progress, self.print_summary) = False, False;
try:
ret = self._execute_stmt(query)
finally:
(self.print_progress, self.print_summary) = prev_print_progress, prev_print_summary
return ret
def _format_outputstream(self):
column_names = self.imp_client.get_column_names(self.last_query_handle)
if self.write_delimited:
formatter = DelimitedOutputFormatter(field_delim=self.output_delimiter)
self.output_stream = OutputStream(formatter, filename=self.output_file)
# print the column names
if self.print_header:
self.output_stream.write([column_names])
else:
prettytable = self.construct_table_with_header(column_names)
formatter = PrettyOutputFormatter(prettytable)
self.output_stream = OutputStream(formatter, filename=self.output_file)
def _periodic_wait_callback(self):
"""If enough time elapsed since the last call to the periodic callback,
execute the RPC to get the query exec summary and depending on the set options
print either the progress or the summary or both to stderr.
"""
if not self.print_progress and not self.print_summary: return
checkpoint = time.time()
if checkpoint - self.last_summary > self.PROGRESS_UPDATE_INTERVAL:
summary = self.imp_client.get_summary(self.last_query_handle)
if summary and summary.progress:
progress = summary.progress
# If the data is not complete return and wait for a good result.
if not progress.total_scan_ranges and not progress.num_completed_scan_ranges:
self.last_summary = time.time()
return
data = ""
if self.print_progress and progress.total_scan_ranges > 0:
val = ((summary.progress.num_completed_scan_ranges * 100) /
summary.progress.total_scan_ranges)
fragment_text = "[%s%s] %s%%\n" % ("#" * val, " " * (100 - val), val)
data += fragment_text
if self.print_summary:
table = self._default_summary_table()
output = []
self.imp_client.build_summary_table(summary, 0, False, 0, False, output)
formatter = PrettyOutputFormatter(table)
data += formatter.format(output) + "\n"
self.progress_stream.write(data)
self.last_summary = time.time()
def _default_summary_table(self):
return self.construct_table_with_header(["Operator", "#Hosts", "Avg Time", "Max Time",
"#Rows", "Est. #Rows", "Peak Mem",
"Est. Peak Mem", "Detail"])
def _execute_stmt(self, query, is_insert=False):
""" The logic of executing any query statement
The client executes the query and the query_handle is returned immediately,
even as the client waits for the query to finish executing.
If the query was not an insert, the results are fetched from the client
as they are streamed in, through the use of a generator.
The execution time is printed and the query is closed if it hasn't been already
"""
try:
self._print_if_verbose("Query: %s" % (query.query,))
start_time = time.time()
self.last_query_handle = self.imp_client.execute_query(query)
self.query_handle_closed = False
self.last_summary = time.time()
wait_to_finish = self.imp_client.wait_to_finish(self.last_query_handle,
self._periodic_wait_callback)
# Reset the progress stream.
self.progress_stream.clear()
if is_insert:
# retrieve the error log
warning_log = self.imp_client.get_warning_log(self.last_query_handle)
num_rows = self.imp_client.close_insert(self.last_query_handle)
else:
# impalad does not support the fetching of metadata for certain types of queries.
if not self.imp_client.expect_result_metadata(query.query):
# Close the query
self.imp_client.close_query(self.last_query_handle)
self.query_handle_closed = True
return CmdStatus.SUCCESS
self._format_outputstream()
# fetch returns a generator
rows_fetched = self.imp_client.fetch(self.last_query_handle)
num_rows = 0
for rows in rows_fetched:
self.output_stream.write(rows)
num_rows += len(rows)
# retrieve the error log
warning_log = self.imp_client.get_warning_log(self.last_query_handle)
end_time = time.time()
if warning_log:
self._print_if_verbose(warning_log)
# print insert when is_insert is true (which is 1)
# print fetch when is_insert is false (which is 0)
verb = ["Fetch", "Insert"][is_insert]
self._print_if_verbose("%sed %d row(s) in %2.2fs" % (verb, num_rows,
end_time - start_time))
if not is_insert:
self.imp_client.close_query(self.last_query_handle, self.query_handle_closed)
self.query_handle_closed = True
profile = self.imp_client.get_runtime_profile(self.last_query_handle)
self.print_runtime_profile(profile)
return CmdStatus.SUCCESS
except RPCException, e:
# could not complete the rpc successfully
print_to_stderr(e)
except QueryStateException, e:
# an exception occurred while executing the query
self.imp_client.close_query(self.last_query_handle, self.query_handle_closed)
print_to_stderr(e)
except DisconnectedException, e:
# the client has lost the connection
print_to_stderr(e)
self.imp_client.connected = False
self.prompt = ImpalaShell.DISCONNECTED_PROMPT
except socket.error, (code, e):
# if the socket was interrupted, reconnect the connection with the client
if code == errno.EINTR:
print ImpalaShell.CANCELLATION_MESSAGE
self._reconnect_cancellation()
else:
print_to_stderr("Socket error %s: %s" % (code, e))
self.prompt = self.DISCONNECTED_PROMPT
self.imp_client.connected = False
except Exception, u:
# if the exception is unknown, there was possibly an issue with the connection
# set the shell as disconnected
print_to_stderr('Unknown Exception : %s' % (u,))
self.imp_client.connected = False
self.prompt = ImpalaShell.DISCONNECTED_PROMPT
return CmdStatus.ERROR
def construct_table_with_header(self, column_names):
""" Constructs the table header for a given query handle.
Should be called after the query has finished and before data is fetched.
All data is left aligned.
"""
table = ImpalaPrettyTable()
for column in column_names:
# Column names may be encoded as utf-8
table.add_column(column.decode('utf-8', 'ignore'), [])
table.align = "l"
return table
def do_values(self, args):
"""Executes a VALUES(...) query, fetching all rows"""
query = self.imp_client.create_beeswax_query("values %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_with(self, args):
"""Executes a query with a WITH clause, fetching all rows"""
query = self.imp_client.create_beeswax_query("with %s" % args,
self.set_query_options)
# Set posix=True and add "'" to escaped quotes
# to deal with escaped quotes in string literals
lexer = shlex.shlex(query.query.lstrip(), posix=True)
lexer.escapedquotes += "'"
# Because the WITH clause may precede INSERT or SELECT queries,
# just checking the first token is insufficient.
is_insert = False
tokens = list(lexer)
if filter(self.INSERT_REGEX.match, tokens): is_insert = True
return self._execute_stmt(query, is_insert=is_insert)
def do_use(self, args):
"""Executes a USE... query"""
query = self.imp_client.create_beeswax_query("use %s" % args,
self.set_query_options)
if self._execute_stmt(query) is CmdStatus.SUCCESS:
self.current_db = args
else:
return CmdStatus.ERROR
def do_show(self, args):
"""Executes a SHOW... query, fetching all rows"""
query = self.imp_client.create_beeswax_query("show %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_describe(self, args):
"""Executes a DESCRIBE... query, fetching all rows"""
query = self.imp_client.create_beeswax_query("describe %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_desc(self, args):
return self.do_describe(args)
def do_insert(self, args):
"""Executes an INSERT query"""
query = self.imp_client.create_beeswax_query("insert %s" % args,
self.set_query_options)
return self._execute_stmt(query, is_insert=True)
def do_explain(self, args):
"""Explain the query execution plan"""
query = self.imp_client.create_beeswax_query("explain %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_history(self, args):
"""Display command history"""
# Deal with readline peculiarity. When history does not exists,
# readline returns 1 as the history length and stores 'None' at index 0.
if self.readline and self.readline.get_current_history_length() > 0:
for index in xrange(1, self.readline.get_current_history_length() + 1):
cmd = self.readline.get_history_item(index)
print_to_stderr('[%d]: %s' % (index, cmd))
else:
print_to_stderr("The readline module was either not found or disabled. Command "
"history will not be collected.")
def do_tip(self, args):
"""Print a random tip"""
print_to_stderr(random.choice(TIPS))
def preloop(self):
"""Load the history file if it exists"""
if self.readline:
# The history file is created when the Impala shell is invoked and commands are
# issued. In the first invocation of the shell, the history file will not exist.
# Clearly, this is not an error, return.
if not os.path.exists(self.history_file): return
try:
self.readline.read_history_file(self.history_file)
self._replace_history_delimiters(ImpalaShell.HISTORY_FILE_QUERY_DELIM, '\n')
except IOError, i:
msg = "Unable to load command history (disabling history collection): %s" % i
print_to_stderr(msg)
# This history file exists but is not readable, disable readline.
self._disable_readline()
def postloop(self):
"""Save session commands in history."""
if self.readline:
try:
self._replace_history_delimiters('\n', ImpalaShell.HISTORY_FILE_QUERY_DELIM)
self.readline.write_history_file(self.history_file)
except IOError, i:
msg = "Unable to save command history (disabling history collection): %s" % i
print_to_stderr(msg)
# The history file is not writable, disable readline.
self._disable_readline()
def _replace_history_delimiters(self, src_delim, tgt_delim):
"""Replaces source_delim with target_delim for all items in history.
Read all the items from history into a local list. Clear the history and copy them
back after doing the transformation.
"""
history_len = self.readline.get_current_history_length()
# load the history and replace the shell's delimiter with EOL
history_items = map(self.readline.get_history_item, xrange(1, history_len + 1))
history_items = [item.replace(src_delim, tgt_delim) for item in history_items]
# Clear the original history and replace it with the mutated history.
self.readline.clear_history()
for history_item in history_items:
self.readline.add_history(history_item)
def default(self, args):
query = self.imp_client.create_beeswax_query(args, self.set_query_options)
return self._execute_stmt(query)
def emptyline(self):
"""If an empty line is entered, do nothing"""
def do_version(self, args):
"""Prints the Impala build version"""
print_to_stderr("Shell version: %s" % VERSION_STRING)
print_to_stderr("Server version: %s" % self.server_version)
def completenames(self, text, *ignored):
"""Make tab completion of commands case agnostic
Override the superclass's completenames() method to support tab completion for
upper case and mixed case commands.
"""
cmd_names = [cmd for cmd in self.commands if cmd.startswith(text.lower())]
# If the user input is upper case, return commands in upper case.
if text.isupper(): return [cmd_names.upper() for cmd_names in cmd_names]
# If the user input is lower case or mixed case, return lower case commands.
return cmd_names
TIPS=[
"Press TAB twice to see a list of available commands.",
"After running a query, type SUMMARY to see a summary of where time was spent.",
"The SET command shows the current value of all shell and query options.",
"To see live updates on a query's progress, run 'set LIVE_SUMMARY=1;'.",
"To see a summary of a query's progress that updates in real-time, run 'set \
LIVE_PROGRESS=1;'.",
"The HISTORY command lists all shell commands in chronological order.",
"The '-B' command line flag turns off pretty-printing for query results. Use this flag \
to remove formatting from results you want to save for later, or to benchmark Impala.",
"You can run a single query from the command line using the '-q' option.",
"When pretty-printing is disabled, you can use the '--output_delimiter' flag to set \
the delimiter for fields in the same row. The default is ','.",
"Run the PROFILE command after a query has finished to see a comprehensive summary of \
all the performance and diagnostic information that Impala gathered for that query. Be \
warned, it can be very long!",
"To see more tips, run the TIP command.",
"Every command must be terminated by a ';'.",
"Want to know what version of Impala you're connected to? Run the VERSION command to \
find out!",
"You can change the Impala daemon that you're connected to by using the CONNECT \
command."
"To see how Impala will plan to run your query without actually executing it, use the \
EXPLAIN command. You can change the level of detail in the EXPLAIN output by setting the \
EXPLAIN_LEVEL query option.",
"When you set a query option it lasts for the duration of the Impala shell session."
]
HEADER_DIVIDER =\
"***********************************************************************************"
def _format_tip(tip):
"""Takes a tip string and splits it on word boundaries so that it fits neatly inside the
shell header."""
return '\n'.join([l for l in textwrap.wrap(tip, len(HEADER_DIVIDER))])
WELCOME_STRING = """\
***********************************************************************************
Welcome to the Impala shell. Copyright (c) 2015 Cloudera, Inc. All rights reserved.
(%s)
%s
***********************************************************************************\
""" \
% (VERSION_STRING, _format_tip(random.choice(TIPS)))
def print_to_stderr(message):
print >> sys.stderr, message
def parse_query_text(query_text, utf8_encode_policy='strict'):
"""Parse query file text to extract queries and encode into utf-8"""
return [q.encode('utf-8', utf8_encode_policy) for q in sqlparse.split(query_text)]
def parse_variables(keyvals):
"""Parse variable assignments passed as arguments in the command line"""
kv_pattern = r'(%s)=(.*)$' % (ImpalaShell.VALID_VAR_NAME_PATTERN,)
vars = {}
if keyvals:
for keyval in keyvals:
match = re.match(kv_pattern, keyval)
if not match:
print_to_stderr('Error: Could not parse key-value "%s". ' + \
'It must follow the pattern "KEY=VALUE".' % (keyval,))
parser.print_help()
sys.exit(1)
else:
vars[match.groups()[0].upper()] = match.groups()[1]
return vars
def execute_queries_non_interactive_mode(options):
"""Run queries in non-interactive mode."""
queries = []
if options.query_file:
try:
# "-" here signifies input from STDIN
if options.query_file == "-":
query_file_handle = sys.stdin
else:
query_file_handle = open(options.query_file, 'r')
queries = parse_query_text(query_file_handle.read())
if query_file_handle != sys.stdin:
query_file_handle.close()
except Exception, e:
print_to_stderr('Error: %s' % e)
sys.exit(1)
elif options.query:
queries = parse_query_text(options.query)
shell = ImpalaShell(options)
# The impalad was specified on the command line and the connection failed.
# Return with an error, no need to process the query.
if options.impalad and shell.imp_client.connected == False:
sys.exit(1)
queries = shell.cmdqueue + queries
# Deal with case.
sanitized_queries = []
for query in queries:
sanitized_queries.append(shell.sanitise_input(query))
for query in sanitized_queries:
# check if an error was encountered
if shell.onecmd(query) is CmdStatus.ERROR:
print_to_stderr('Could not execute command: %s' % query)
if not options.ignore_query_failure:
sys.exit(1)
if __name__ == "__main__":
# pass defaults into option parser
parser = get_option_parser(impala_shell_defaults)
options, args = parser.parse_args()
# use path to file specified by user in config_file option
user_config = os.path.expanduser(options.config_file);
# by default, use the .impalarc in the home directory
config_to_load = impala_shell_defaults.get("config_file")
# verify user_config, if found
if os.path.isfile(user_config) and user_config != config_to_load:
if options.verbose:
print_to_stderr("Loading in options from config file: %s \n" % user_config)
# Command line overrides loading ~/.impalarc
config_to_load = user_config
elif user_config != config_to_load:
print_to_stderr('%s not found.\n' % user_config)
sys.exit(1)
# default options loaded in from impala_shell_config_defaults.py
# options defaults overwritten by those in config file
try:
impala_shell_defaults.update(get_config_from_file(config_to_load))
except Exception, e:
msg = "Unable to read configuration file correctly. Check formatting: %s\n" % e
print_to_stderr(msg)
sys.exit(1)
parser = get_option_parser(impala_shell_defaults)
options, args = parser.parse_args()
# Arguments that could not be parsed are stored in args. Print an error and exit.
if len(args) > 0:
print_to_stderr('Error, could not parse arguments "%s"' % (' ').join(args))
parser.print_help()
sys.exit(1)
if options.version:
print VERSION_STRING
sys.exit(0)
if options.use_kerberos and options.use_ldap:
print_to_stderr("Please specify at most one authentication mechanism (-k or -l)")
sys.exit(1)
if not options.ssl and not options.creds_ok_in_clear and options.use_ldap:
print_to_stderr("LDAP credentials may not be sent over insecure " +
"connections. Enable SSL or set --auth_creds_ok_in_clear")
sys.exit(1)
if options.use_kerberos:
print_to_stderr("Starting Impala Shell using Kerberos authentication")
print_to_stderr("Using service name '%s'" % options.kerberos_service_name)
# Check if the user has a ticket in the credentials cache
try:
if call(['klist', '-s']) != 0:
print_to_stderr(("-k requires a valid kerberos ticket but no valid kerberos "
"ticket found."))
sys.exit(1)
except OSError, e:
print_to_stderr('klist not found on the system, install kerberos clients')
sys.exit(1)
elif options.use_ldap:
print_to_stderr("Starting Impala Shell using LDAP-based authentication")
else:
print_to_stderr("Starting Impala Shell without Kerberos authentication")
options.ldap_password = None
if options.use_ldap and options.ldap_password_cmd:
try:
p = subprocess.Popen(shlex.split(options.ldap_password_cmd), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
options.ldap_password, stderr = p.communicate()
if p.returncode != 0:
print_to_stderr("Error retrieving LDAP password (command was '%s', error was: "
"'%s')" % (options.ldap_password_cmd, stderr.strip()))
sys.exit(1)
except Exception, e:
print_to_stderr("Error retrieving LDAP password (command was: '%s', exception "
"was: '%s')" % (options.ldap_password_cmd, e))
sys.exit(1)
if options.ssl:
if options.ca_cert is None:
print_to_stderr("SSL is enabled. Impala server certificates will NOT be verified"\
" (set --ca_cert to change)")
else:
print_to_stderr("SSL is enabled")
if options.output_file:
try:
# Make sure the given file can be opened for writing. This will also clear the file
# if successful.
open(options.output_file, 'wb')
except IOError, e:
print_to_stderr('Error opening output file for writing: %s' % e)
sys.exit(1)
options.variables = parse_variables(options.keyval)
if options.query or options.query_file:
if options.print_progress or options.print_summary:
print_to_stderr("Error: Live reporting is available for interactive mode only.")
sys.exit(1)
execute_queries_non_interactive_mode(options)
sys.exit(0)
intro = WELCOME_STRING
if not options.ssl and options.creds_ok_in_clear and options.use_ldap:
intro += ("\n\\nLDAP authentication is enabled, but the connection to Impala is " +
"not secured by TLS.\nALL PASSWORDS WILL BE SENT IN THE CLEAR TO IMPALA.\n")
shell = ImpalaShell(options)
while shell.is_alive:
try:
try:
shell.cmdloop(intro)
except KeyboardInterrupt:
intro = '\n'
# A last measure against any exceptions thrown by an rpc
# not caught in the shell
except socket.error, (code, e):
# if the socket was interrupted, reconnect the connection with the client
if code == errno.EINTR:
print shell.CANCELLATION_MESSAGE
shell._reconnect_cancellation()
else:
print_to_stderr("Socket error %s: %s" % (code, e))
shell.imp_client.connected = False
shell.prompt = shell.DISCONNECTED_PROMPT
except DisconnectedException, e:
# the client has lost the connection
print_to_stderr(e)
shell.imp_client.connected = False
shell.prompt = shell.DISCONNECTED_PROMPT
except QueryStateException, e:
# an exception occurred while executing the query
shell.imp_client.close_query(shell.last_query_handle,
shell.query_handle_closed)
print_to_stderr(e)
except RPCException, e:
# could not complete the rpc successfully
print_to_stderr(e)
finally:
intro = ''
| apache-2.0 |
iuliat/nova | nova/tests/unit/scheduler/weights/test_weights_ram.py | 73 | 4062 | # Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler RAM weights.
"""
from nova.scheduler import weights
from nova.scheduler.weights import ram
from nova import test
from nova.tests.unit.scheduler import fakes
class RamWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(RamWeigherTestCase, self).setUp()
self.weight_handler = weights.HostWeightHandler()
self.weighers = [ram.RAMWeigher()]
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {}
return self.weight_handler.get_weighed_objects(self.weighers,
hosts, weight_properties)[0]
def _get_all_hosts(self):
host_values = [
('host1', 'node1', {'free_ram_mb': 512}),
('host2', 'node2', {'free_ram_mb': 1024}),
('host3', 'node3', {'free_ram_mb': 3072}),
('host4', 'node4', {'free_ram_mb': 8192})
]
return [fakes.FakeHostState(host, node, values)
for host, node, values in host_values]
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(1.0, weighed_host.weight)
self.assertEqual('host4', weighed_host.obj.host)
def test_ram_filter_multiplier1(self):
self.flags(ram_weight_multiplier=0.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# We do not know the host, all have same weight.
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(0.0, weighed_host.weight)
def test_ram_filter_multiplier2(self):
self.flags(ram_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(1.0 * 2, weighed_host.weight)
self.assertEqual('host4', weighed_host.obj.host)
def test_ram_filter_negative(self):
self.flags(ram_weight_multiplier=1.0)
hostinfo_list = self._get_all_hosts()
host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512}
host_state = fakes.FakeHostState('negative', 'negative', host_attr)
hostinfo_list = list(hostinfo_list) + [host_state]
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# negativehost: free_ram_mb=-512
# so, host4 should win
weights = self.weight_handler.get_weighed_objects(self.weighers,
hostinfo_list, {})
weighed_host = weights[0]
self.assertEqual(1, weighed_host.weight)
self.assertEqual('host4', weighed_host.obj.host)
# and negativehost should lose
weighed_host = weights[-1]
self.assertEqual(0, weighed_host.weight)
self.assertEqual('negative', weighed_host.obj.host)
| apache-2.0 |
catapult-project/catapult | tracing/tracing/proto/histogram_proto_unittest.py | 4 | 1578 | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from tracing.proto import histogram_proto
class HistogramProtoUnittest(unittest.TestCase):
def testUnitFromProto(self):
proto = histogram_proto.Pb2()
proto_unit = proto.UnitAndDirection()
proto_unit.unit = proto.N_PERCENT
proto_unit.improvement_direction = proto.BIGGER_IS_BETTER
proto_unit2 = proto.UnitAndDirection()
proto_unit2.unit = proto.BYTES_PER_SECOND
proto_unit2.improvement_direction = proto.SMALLER_IS_BETTER
proto_unit3 = proto.UnitAndDirection()
proto_unit3.unit = proto.SIGMA
self.assertEqual('n%_biggerIsBetter',
histogram_proto.UnitFromProto(proto_unit))
self.assertEqual('bytesPerSecond_smallerIsBetter',
histogram_proto.UnitFromProto(proto_unit2))
self.assertEqual('sigma',
histogram_proto.UnitFromProto(proto_unit3))
def testProtoFromUnit(self):
proto = histogram_proto.Pb2()
unit1 = histogram_proto.ProtoFromUnit('count_biggerIsBetter')
unit2 = histogram_proto.ProtoFromUnit('Hz_smallerIsBetter')
unit3 = histogram_proto.ProtoFromUnit('unitless')
self.assertEqual(unit1.unit, proto.COUNT)
self.assertEqual(unit1.improvement_direction, proto.BIGGER_IS_BETTER)
self.assertEqual(unit2.unit, proto.HERTZ)
self.assertEqual(unit2.improvement_direction, proto.SMALLER_IS_BETTER)
self.assertEqual(unit3.unit, proto.UNITLESS)
| bsd-3-clause |
franky88/emperioanimesta | env/Lib/site-packages/django/conf/locale/en_GB/formats.py | 504 | 2117 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j M Y' # '25 Oct 2006'
TIME_FORMAT = 'P' # '2:30 p.m.'
DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 p.m.'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'j F' # '25 October'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 p.m.'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| gpl-3.0 |
omaciel/billreminder | src/daemon/dbus_manager.py | 2 | 7091 | # -*- coding: utf-8 -*-
__all__ = ['Server', 'get_interface', 'verify_service']
from datetime import date
import dbus
import dbus.service
from lib import common, scheduler
from lib.utils import force_string
from lib.utils import get_dbus_interface as get_interface
from lib.utils import verify_dbus_service as verify_service
from lib import i18n
# Set up DBus event loop
try:
# dbus-python 0.80 and later
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
except ImportError:
# dbus-python prior to 0.80
import dbus.glib
class Server(dbus.service.Object):
""" DBus Service """
def __init__(self, parent):
self.parent = parent
self.actions = self.parent.actions
# Start DBus support
self.__session_bus = dbus.SessionBus()
self.__bus_name = dbus.service.BusName(common.DBUS_INTERFACE,
bus=self.__session_bus)
dbus.service.Object.__init__(self, self.__bus_name, common.DBUS_PATH)
# DBus Methods (Called via DBus Service)
@dbus.service.method(common.DBUS_INTERFACE, out_signature='s')
def hello(self):
return _('This is %(appname)s - Version: %(version)s') % \
{'appname': _("BillReminder Notifier"),
'version': common.APPVERSION}
@dbus.service.method(common.DBUS_INTERFACE, out_signature='b')
def quit(self):
self.parent.quit()
return True
@dbus.service.method(common.DBUS_INTERFACE, in_signature='i', out_signature='i')
def register(self, pid):
self.parent.client_pid = pid
return self.parent.client_pid
@dbus.service.method(common.DBUS_INTERFACE, in_signature='iii', out_signature='aa{ss}')
def get_interval_bills(self, start, end, paid):
ret = []
records = self.actions.get_interval_bills(date.fromordinal(start), date.fromordinal(end), paid)
for record in records:
ret.append(force_string(record.__dict__))
return ret
@dbus.service.method(common.DBUS_INTERFACE, in_signature='iii', out_signature='aa{ss}')
def get_alarm_bills(self, start, end, paid):
ret = []
records = self.actions.get_alarm_bills(date.fromordinal(start), date.fromordinal(end), paid)
for record in records:
ret.append(force_string(record))
return ret
@dbus.service.method(common.DBUS_INTERFACE, in_signature='iii', out_signature='a(sis)')
def get_monthly_totals(self, start, end, paid):
# Return a list of categories and totals for the given month
ret = []
records = self.actions.get_monthly_totals(date.fromordinal(start), date.fromordinal(end), paid)
for record in records:
ret.append(record)
return ret
@dbus.service.method(common.DBUS_INTERFACE, in_signature='iii', out_signature='aa{ss}')
def get_monthly_bills(self, month, year, paid):
ret = []
records = self.actions.get_interval_bills(month, year, paid)
for record in records:
ret.append(force_string(record))
return ret
@dbus.service.method(common.DBUS_INTERFACE, in_signature='a{ss}', out_signature='aa{ss}')
def get_bills(self, kwargs):
""" Returns one or more records that meet the criteria passed """
ret = []
records = self.actions.get_bills(kwargs)
for record in records:
ret.append(force_string(record))
return ret
@dbus.service.method(common.DBUS_INTERFACE, in_signature='s', out_signature='aa{ss}')
def get_bills_(self, kwargs):
""" Returns one or more records that meet the criteria passed """
ret = []
records = self.actions.get_bills(kwargs)
for record in records:
ret.append(force_string(record))
return ret
@dbus.service.method(common.DBUS_INTERFACE, in_signature='a{ss}', out_signature='aa{ss}')
def get_categories(self, kwargs):
""" Returns one or more records that meet the criteria passed """
print 'get_categories'
ret = []
records = self.actions.get_categories(**kwargs)
print records
for record in records:
ret.append(force_string(record))
return ret
@dbus.service.method(common.DBUS_INTERFACE, in_signature='s', out_signature='aa{ss}')
def get_categories_(self, kwargs):
""" Returns one or more records that meet the criteria passed """
ret = []
records = self.actions.get_categories(kwargs)
for record in records:
ret.append(force_string(record))
return ret
@dbus.service.method(common.DBUS_INTERFACE, in_signature='a{ss}', out_signature='a{ss}')
def edit(self, kwargs):
""" Edit a record in the database """
ret = self.actions.edit(kwargs)
if ret:
self.bill_edited(ret)
return force_string(ret)
@dbus.service.method(common.DBUS_INTERFACE, in_signature='a{ss}', out_signature='a{ss}')
def add(self, kwargs):
""" Add a record to the database """
ret = self.actions.add(kwargs)
if ret:
self.bill_added(kwargs)
return force_string(ret)
@dbus.service.method(common.DBUS_INTERFACE, in_signature='a{ss}', out_signature='b')
def delete(self, kwargs):
""" Delete a record in the database """
ret = self.actions.delete(kwargs)
if ret:
self.bill_deleted(kwargs)
return ret
@dbus.service.method(common.DBUS_INTERFACE, in_signature='a{ss}')
def set_tray_hints(self, hints):
# Set tray icon hints
hints['x'] = int(hints['x'])
hints['y'] = int(hints['y'])
self.parent.alarm.tray_hints = hints
@dbus.service.method(common.DBUS_INTERFACE, out_signature='s')
def get_notification_message(self):
return self.parent.alarm.show_pay_notification(show=False)
@dbus.service.method(common.DBUS_INTERFACE, in_signature='ss', out_signature='b')
def show_message(self, title, msg):
self.parent.alarm.show_notification(title, msg)
return True
# DBus Signals
@dbus.service.signal(common.DBUS_INTERFACE, signature='a{ss}')
def bill_added(self, kwargs):
print 'Signal Emmited: bill_added'
@dbus.service.signal(common.DBUS_INTERFACE, signature='a{ss}')
def bill_edited(self, kwargs):
print 'Signal Emmited: bill_edited'
@dbus.service.signal(common.DBUS_INTERFACE, signature='i')
def bill_deleted(self, key):
print 'Signal Emmited: bill_deleted'
@dbus.service.signal(common.DBUS_INTERFACE, signature='ssis')
def show_notification(self, title, body, timeout, icon):
print 'Signal Emmited: show_notification'
@dbus.service.signal(common.DBUS_INTERFACE, signature='sss')
def show_alert(self, title, body, type_):
print 'Signal Emmited: show_alert'
@dbus.service.signal(common.DBUS_INTERFACE)
def show_main_window(self):
print 'Signal Emmited: show_main_window'
| gpl-3.0 |
MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Lib/test/test_bsddb185.py | 194 | 1258 | """Tests for the bsddb185 module.
The file 185test.db found in Lib/test/ is for testing purposes with this
testing suite.
"""
from test.test_support import run_unittest, findfile, import_module
import unittest
bsddb185 = import_module('bsddb185', deprecated=True)
import anydbm
import whichdb
import os
import tempfile
import shutil
class Bsddb185Tests(unittest.TestCase):
def test_open_existing_hash(self):
# Verify we can open a file known to be a hash v2 file
db = bsddb185.hashopen(findfile("185test.db"))
self.assertEqual(db["1"], "1")
db.close()
def test_whichdb(self):
# Verify that whichdb correctly sniffs the known hash v2 file
self.assertEqual(whichdb.whichdb(findfile("185test.db")), "bsddb185")
def test_anydbm_create(self):
# Verify that anydbm.open does *not* create a bsddb185 file
tmpdir = tempfile.mkdtemp()
try:
dbfile = os.path.join(tmpdir, "foo.db")
anydbm.open(dbfile, "c").close()
ftype = whichdb.whichdb(dbfile)
self.assertNotEqual(ftype, "bsddb185")
finally:
shutil.rmtree(tmpdir)
def test_main():
run_unittest(Bsddb185Tests)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.