repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
recht/raven-python | tests/contrib/django/urls.py | 19 | 1367 | from __future__ import absolute_import
from django.conf import settings
try:
from django.conf.urls import url, patterns
except ImportError:
# for Django version less than 1.4
from django.conf.urls.defaults import url, patterns # NOQA
from django.http import HttpResponse
def handler404(request):
return HttpResponse('', status=404)
def handler500(request):
if getattr(settings, 'BREAK_THAT_500', False):
raise ValueError('handler500')
return HttpResponse('', status=500)
urlpatterns = patterns('',
url(r'^no-error$', 'tests.contrib.django.views.no_error', name='sentry-no-error'),
url(r'^fake-login$', 'tests.contrib.django.views.fake_login', name='sentry-fake-login'),
url(r'^trigger-500$', 'tests.contrib.django.views.raise_exc', name='sentry-raise-exc'),
url(r'^trigger-500-ioerror$', 'tests.contrib.django.views.raise_ioerror', name='sentry-raise-ioerror'),
url(r'^trigger-500-decorated$', 'tests.contrib.django.views.decorated_raise_exc', name='sentry-raise-exc-decor'),
url(r'^trigger-500-django$', 'tests.contrib.django.views.django_exc', name='sentry-django-exc'),
url(r'^trigger-500-template$', 'tests.contrib.django.views.template_exc', name='sentry-template-exc'),
url(r'^trigger-500-log-request$', 'tests.contrib.django.views.logging_request_exc', name='sentry-log-request-exc'),
)
| bsd-3-clause |
Xowap/ansible | lib/ansible/executor/process/worker.py | 30 | 6205 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import queue
import multiprocessing
import os
import signal
import sys
import time
import traceback
HAS_ATFORK=True
try:
from Crypto.Random import atfork
except ImportError:
HAS_ATFORK=False
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.executor.task_executor import TaskExecutor
from ansible.executor.task_result import TaskResult
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
from ansible.utils.debug import debug
__all__ = ['WorkerProcess']
class WorkerProcess(multiprocessing.Process):
'''
The worker thread class, which uses TaskExecutor to run tasks
read from a job queue and pushes results into a results queue
for reading later.
'''
def __init__(self, tqm, main_q, rslt_q, loader):
# takes a task queue manager as the sole param:
self._main_q = main_q
self._rslt_q = rslt_q
self._loader = loader
# dupe stdin, if we have one
self._new_stdin = sys.stdin
try:
fileno = sys.stdin.fileno()
if fileno is not None:
try:
self._new_stdin = os.fdopen(os.dup(fileno))
except OSError, e:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
except ValueError:
# couldn't get stdin's fileno, so we just carry on
pass
super(WorkerProcess, self).__init__()
def run(self):
'''
Called when the process is started, and loops indefinitely
until an error is encountered (typically an IOerror from the
queue pipe being disconnected). During the loop, we attempt
to pull tasks off the job queue and run them, pushing the result
onto the results queue. We also remove the host from the blocked
hosts list, to signify that they are ready for their next task.
'''
if HAS_ATFORK:
atfork()
while True:
task = None
try:
if not self._main_q.empty():
debug("there's work to be done!")
(host, task, basedir, job_vars, play_context, shared_loader_obj) = self._main_q.get(block=False)
debug("got a task/handler to work on: %s" % task)
# because the task queue manager starts workers (forks) before the
# playbook is loaded, set the basedir of the loader inherted by
# this fork now so that we can find files correctly
self._loader.set_basedir(basedir)
# Serializing/deserializing tasks does not preserve the loader attribute,
# since it is passed to the worker during the forking of the process and
# would be wasteful to serialize. So we set it here on the task now, and
# the task handles updating parent/child objects as needed.
task.set_loader(self._loader)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
new_play_context = play_context.set_task_and_variable_override(task=task, variables=job_vars)
# execute the task and build a TaskResult from the result
debug("running TaskExecutor() for %s/%s" % (host, task))
executor_result = TaskExecutor(host, task, job_vars, new_play_context, self._new_stdin, self._loader, shared_loader_obj).run()
debug("done running TaskExecutor() for %s/%s" % (host, task))
task_result = TaskResult(host, task, executor_result)
# put the result on the result queue
debug("sending task result")
self._rslt_q.put(task_result, block=False)
debug("done sending task result")
else:
time.sleep(0.1)
except queue.Empty:
pass
except (IOError, EOFError, KeyboardInterrupt):
break
except AnsibleConnectionFailure:
try:
if task:
task_result = TaskResult(host, task, dict(unreachable=True))
self._rslt_q.put(task_result, block=False)
except:
# FIXME: most likely an abort, catch those kinds of errors specifically
break
except Exception, e:
debug("WORKER EXCEPTION: %s" % e)
debug("WORKER EXCEPTION: %s" % traceback.format_exc())
try:
if task:
task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
self._rslt_q.put(task_result, block=False)
except:
# FIXME: most likely an abort, catch those kinds of errors specifically
break
debug("WORKER PROCESS EXITING")
| gpl-3.0 |
flavour/tldrmp | private/templates/default/menus.py | 4 | 4148 | # -*- coding: utf-8 -*-
from gluon import *
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
# Below is an example which you can base your own template's menus.py on
# - there are also other examples in the other templates folders
# =============================================================================
#class S3MainMenu(default.S3MainMenu):
#"""
#Custom Application Main Menu:
#The main menu consists of several sub-menus, each of which can
#be customized separately as a method of this class. The overall
#composition of the menu is defined in the menu() method, which can
#be customized as well:
#Function Sub-Menu Access to (standard)
#menu_modules() the modules menu the Eden modules
#menu_gis() the GIS menu GIS configurations
#menu_admin() the Admin menu System/User Administration
#menu_lang() the Language menu Selection of the GUI locale
#menu_auth() the User menu Login, Logout, User Profile
#menu_help() the Help menu Contact page, About page
#The standard uses the MM layout class for main menu items - but you
#can of course use a custom layout class which you define in layouts.py.
#Additional sub-menus can simply be defined as additional functions in
#this class, and then be included in the menu() method.
#Each sub-menu function returns a list of menu items, only the menu()
#function must return a layout class instance.
#"""
# -------------------------------------------------------------------------
#@classmethod
#def menu(cls):
#""" Compose Menu """
#main_menu = MM()(
## Modules-menu, align-left
#cls.menu_modules(),
## Service menus, align-right
## Note: always define right-hand items in reverse order!
#cls.menu_help(right=True),
#cls.menu_auth(right=True),
#cls.menu_lang(right=True),
#cls.menu_admin(right=True),
#cls.menu_gis(right=True)
#)
#return main_menu
# -------------------------------------------------------------------------
#@classmethod
#def menu_modules(cls):
#""" Custom Modules Menu """
#return [
#homepage(),
#homepage("gis"),
#homepage("pr")(
#MM("Persons", f="person"),
#MM("Groups", f="group")
#),
#MM("more", link=False)(
#homepage("dvi"),
#homepage("irs")
#),
#]
# =============================================================================
#class S3OptionsMenu(default.S3OptionsMenu):
#"""
#Custom Controller Menus
#The options menu (left-hand options menu) is individual for each
#controller, so each controller has its own options menu function
#in this class.
#Each of these option menu functions can be customized separately,
#by simply overriding (re-defining) the default function. The
#options menu function must return an instance of the item layout.
#The standard menu uses the M item layout class, but you can of
#course also use any other layout class which you define in
#layouts.py (can also be mixed).
#Make sure additional helper functions in this class don't match
#any current or future controller prefix (e.g. by using an
#underscore prefix).
#"""
#def cr(self):
#""" CR / Shelter Registry """
#return M(c="cr")(
#M("Camp", f="shelter")(
#M("New", m="create"),
#M("List All"),
#M("Map", m="map"),
#M("Import", m="import"),
#)
#)
# END =========================================================================
| mit |
defaultnamehere/grr | lib/time_utils.py | 4 | 2416 | #!/usr/bin/env python
"""This file contains various utilities for datetime handling."""
import calendar
import datetime
import re
import time
# Special Windows value for 'the beginning of time'
NULL_FILETIME = datetime.datetime(1601, 1, 1, 0, 0, 0)
# Regex for times in windows wmi converted format 20080726084622.375000+120
TIME_WMI_RE = re.compile(r"(?P<date>\d{14})\." # date then .
r"(?P<subsecond>\d{6})[+-]" # secs then + or -
r"(?P<tzoffset>\d{3})") # minute timezone offset
def DatetimeToWmiTime(dt):
"""Take a datetime tuple and return it as yyyymmddHHMMSS.mmmmmm+UUU string.
Args:
dt: A datetime object.
Returns:
A string in CMI_DATETIME format.
http://www.dmtf.org/sites/default/files/standards/documents/DSP0004_2.5.0.pdf
"""
td = dt.utcoffset()
if td:
offset = (td.seconds + (td.days * 60 * 60 * 24)) / 60
if offset >= 0:
str_offset = "+%03d" % offset
else:
str_offset = "%03d" % offset
else:
str_offset = "+000"
return u"%04d%02d%02d%02d%02d%02d.%06d%s" % (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond, str_offset)
def WmiTimeToEpoch(cimdatetime_str):
"""Convert a CIM_DATETIME string to microseconds since epoch.
Args:
cimdatetime_str: A string in WMI format
Returns:
Microseconds since epoch as int or 0 on failure.
http://www.dmtf.org/sites/default/files/standards/documents/DSP0004_2.5.0.pdf
"""
re_match = TIME_WMI_RE.match(cimdatetime_str)
try:
t_dict = re_match.groupdict()
flt_time = time.strptime(t_dict["date"], "%Y%m%d%H%M%S")
epoch_time = int(calendar.timegm(flt_time)) * 1000000
# Note that the tzoffset value is ignored, CIM_DATETIME stores in UTC
epoch_time += int(t_dict["subsecond"])
return epoch_time
except (KeyError, AttributeError):
return 0
def WinFileTimeToDateTime(filetime):
"""Take a Windows FILETIME as integer and convert to DateTime."""
return NULL_FILETIME + datetime.timedelta(microseconds=filetime/10)
def AmericanDateToEpoch(date_str):
"""Take a US format date and return epoch. Used for some broken WMI calls."""
try:
epoch = time.strptime(date_str, "%m/%d/%Y")
return int(calendar.timegm(epoch)) * 1000000
except ValueError:
return 0
| apache-2.0 |
pwmarcz/django | django/template/defaultfilters.py | 11 | 28360 | """Default variable filters."""
from __future__ import unicode_literals
import re
import random as random_module
from decimal import Decimal, InvalidOperation, Context, ROUND_HALF_UP
from functools import wraps
from pprint import pformat
import warnings
from django.template.base import Variable, Library, VariableDoesNotExist
from django.conf import settings
from django.utils import formats
from django.utils.dateformat import format, time_format
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import (conditional_escape, escapejs,
escape, urlize as _urlize, linebreaks, strip_tags, avoid_wrapping,
remove_tags)
from django.utils.http import urlquote
from django.utils.text import Truncator, wrap, phone2numeric
from django.utils.safestring import mark_safe, SafeData, mark_for_escaping
from django.utils import six
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
from django.utils.text import normalize_newlines, slugify as _slugify
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_text(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completely invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_text(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return ''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_text(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1]) - tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP,
Context(prec=prec)).as_tuple()
digits = [six.text_type(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_text(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = six.text_type(len(six.text_type(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return ("%" + six.text_type(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncates HTML after a certain number of chars.
Argument: Number of chars to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=None):
"""Converts URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=None):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=None):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_text(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
return remove_tags(value, tags)
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve, reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=None):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_text, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=False)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return 0
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://www.diveintopython3.net/native-datatypes.html#slicinglists
for an introduction.
"""
try:
bits = []
for x in arg.split(':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=None):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
# see if second item is iterable
iter(second_item)
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def walk_items(item_list):
item_iterator = iter(item_list)
for item in item_iterator:
try:
next_item = next(item_iterator)
except StopIteration:
next_item = None
if not isinstance(next_item, six.string_types):
try:
iter(next_item)
except TypeError:
pass
else:
yield item, next_item
continue
yield item, None
if next_item:
yield next_item, None
def list_formatter(item_list, tabs=1):
indent = '\t' * tabs
output = []
for item, children in walk_items(item_list):
sublist = ''
if children:
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (
indent, list_formatter(children, tabs + 1), indent, indent)
output.append('%s<li>%s%s</li>' % (
indent, escaper(force_text(item)), sublist))
return '\n'.join(output)
value, converted = convert_old_style_list(value)
if converted:
warnings.warn(
"The old style syntax in `unordered_list` is deprecated and will "
"be removed in Django 2.0. Use the the new format instead.",
RemovedInDjango20Warning)
return mark_safe(list_formatter(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError, ValueError, UnicodeDecodeError):
value = ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
return avoid_wrapping(value)
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
if bytes < KB:
value = ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
elif bytes < MB:
value = ugettext("%s KB") % filesize_number_format(bytes / KB)
elif bytes < GB:
value = ugettext("%s MB") % filesize_number_format(bytes / MB)
elif bytes < TB:
value = ugettext("%s GB") % filesize_number_format(bytes / GB)
elif bytes < PB:
value = ugettext("%s TB") % filesize_number_format(bytes / TB)
else:
value = ugettext("%s PB") % filesize_number_format(bytes / PB)
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if ',' not in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
if float(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, force_text(e, errors="replace"))
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/pprint.py | 71 | 11777 | # Author: Fred L. Drake, Jr.
# fdrake@acm.org
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import sys as _sys
import warnings
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_id = id
_len = len
_type = type
def pprint(object, stream=None, indent=1, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
def _sorted(iterable):
with warnings.catch_warnings():
if _sys.py3kwarning:
warnings.filterwarnings("ignore", "comparing unequal types "
"not supported", DeprecationWarning)
return sorted(iterable)
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
objid = _id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) > (self._width - 1 - indent - allowance)
write = stream.write
if self._depth and level > self._depth:
write(rep)
return
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
items = _sorted(object.items())
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
if sepLines:
write(',\n%s%s: ' % (' '*indent, rep))
else:
write(', %s: ' % rep)
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if ((issubclass(typ, list) and r is list.__repr__) or
(issubclass(typ, tuple) and r is tuple.__repr__) or
(issubclass(typ, set) and r is set.__repr__) or
(issubclass(typ, frozenset) and r is frozenset.__repr__)
):
length = _len(object)
if issubclass(typ, list):
write('[')
endchar = ']'
elif issubclass(typ, tuple):
write('(')
endchar = ')'
else:
if not length:
write(rep)
return
write(typ.__name__)
write('([')
endchar = '])'
indent += len(typ.__name__) + 1
object = _sorted(object)
if self._indent_per_level > 1 and sepLines:
write((self._indent_per_level - 1) * ' ')
if length:
context[objid] = 1
indent = indent + self._indent_per_level
self._format(object[0], stream, indent, allowance + 1,
context, level)
if length > 1:
for ent in object[1:]:
if sepLines:
write(',\n' + ' '*indent)
else:
write(', ')
self._format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
write(rep)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = _id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in _sorted(object.items()):
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = _id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % _commajoin(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print "_safe_repr:", t2 - t1
print "pformat:", t3 - t2
if __name__ == "__main__":
_perfcheck()
| gpl-3.0 |
davidvon/pipa-pay-server | site-packages/pip/vcs/git.py | 473 | 7898 | import tempfile
import re
import os.path
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.log import logger
from pip.backwardcompat import url2pathname, urlparse
urlsplit = urlparse.urlsplit
urlunsplit = urlparse.urlunsplit
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = ('git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file')
bundle_file = 'git-clone.txt'
guide = ('# This was a Git repo; to make it a repo again run:\n'
'git init\ngit remote add origin %(url)s -f\ngit checkout %(rev)s\n')
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = initial_slashes + url2pathname(path).replace('\\', '/').lstrip('/')
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit((scheme[after_plus:], netloc, newpath, query, fragment))
super(Git, self).__init__(url, *args, **kwargs)
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'git\s*remote\s*add\s*origin(.*)\s*-f', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^git\s*checkout\s*-q\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
call_subprocess(
[self.cmd, 'checkout-index', '-a', '-f', '--prefix', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warn("Could not find a tag or branch '%s', assuming commit." % rev)
return rev_options
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'config', 'remote.origin.url', url], cwd=dest)
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
call_subprocess([self.cmd, 'fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(rev_options[0], dest, rev_options)
call_subprocess([self.cmd, 'reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning %s%s to %s' % (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '-q', url, dest])
#: repo may contain submodules
self.update_submodules(dest)
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.get_revision(dest).startswith(rev_options[0]):
call_subprocess([self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = call_subprocess(
[self.cmd, 'rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
output = call_subprocess([self.cmd, 'show-ref'],
show_stdout=False, cwd=location)
rv = {}
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
ref = ref.strip()
ref_name = None
if ref.startswith('refs/remotes/'):
ref_name = ref[len('refs/remotes/'):]
elif ref.startswith('refs/heads/'):
ref_name = ref[len('refs/heads/'):]
elif ref.startswith('refs/tags/'):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit.strip()
return rv
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
refs = self.get_refs(location)
# refs maps names to commit hashes; we need the inverse
# if multiple names map to a single commit, this arbitrarily picks one
names_by_commit = dict((commit, ref) for ref, commit in refs.items())
if current_rev in names_by_commit:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, names_by_commit[current_rev])
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if not '://' in self.url:
assert not 'file:' in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
call_subprocess([self.cmd, 'submodule', 'update', '--init', '--recursive', '-q'],
cwd=location)
vcs.register(Git)
| apache-2.0 |
scs/uclinux | user/python/python-2.4.4/Lib/imaplib.py | 5 | 45542 | """IMAP4 client.
Based on RFC 2060.
Public class: IMAP4
Public variable: Debug
Public functions: Internaldate2tuple
Int2AP
ParseFlags
Time2Internaldate
"""
# Author: Piers Lauder <piers@cs.su.oz.au> December 1997.
#
# Authentication code contributed by Donn Cave <donn@u.washington.edu> June 1998.
# String method conversion by ESR, February 2001.
# GET/SETACL contributed by Anthony Baxter <anthony@interlink.com.au> April 2001.
# IMAP4_SSL contributed by Tino Lange <Tino.Lange@isg.de> March 2002.
# GET/SETQUOTA contributed by Andreas Zeidler <az@kreativkombinat.de> June 2002.
# PROXYAUTH contributed by Rick Holbert <holbert.13@osu.edu> November 2002.
__version__ = "2.55"
import binascii, os, random, re, socket, sys, time
__all__ = ["IMAP4", "IMAP4_SSL", "IMAP4_stream", "Internaldate2tuple",
"Int2AP", "ParseFlags", "Time2Internaldate"]
# Globals
CRLF = '\r\n'
Debug = 0
IMAP4_PORT = 143
IMAP4_SSL_PORT = 993
AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first
# Commands
Commands = {
# name valid states
'APPEND': ('AUTH', 'SELECTED'),
'AUTHENTICATE': ('NONAUTH',),
'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'CHECK': ('SELECTED',),
'CLOSE': ('SELECTED',),
'COPY': ('SELECTED',),
'CREATE': ('AUTH', 'SELECTED'),
'DELETE': ('AUTH', 'SELECTED'),
'DELETEACL': ('AUTH', 'SELECTED'),
'EXAMINE': ('AUTH', 'SELECTED'),
'EXPUNGE': ('SELECTED',),
'FETCH': ('SELECTED',),
'GETACL': ('AUTH', 'SELECTED'),
'GETQUOTA': ('AUTH', 'SELECTED'),
'GETQUOTAROOT': ('AUTH', 'SELECTED'),
'MYRIGHTS': ('AUTH', 'SELECTED'),
'LIST': ('AUTH', 'SELECTED'),
'LOGIN': ('NONAUTH',),
'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'LSUB': ('AUTH', 'SELECTED'),
'NAMESPACE': ('AUTH', 'SELECTED'),
'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'PARTIAL': ('SELECTED',), # NB: obsolete
'PROXYAUTH': ('AUTH',),
'RENAME': ('AUTH', 'SELECTED'),
'SEARCH': ('SELECTED',),
'SELECT': ('AUTH', 'SELECTED'),
'SETACL': ('AUTH', 'SELECTED'),
'SETQUOTA': ('AUTH', 'SELECTED'),
'SORT': ('SELECTED',),
'STATUS': ('AUTH', 'SELECTED'),
'STORE': ('SELECTED',),
'SUBSCRIBE': ('AUTH', 'SELECTED'),
'THREAD': ('SELECTED',),
'UID': ('SELECTED',),
'UNSUBSCRIBE': ('AUTH', 'SELECTED'),
}
# Patterns to match server responses
Continuation = re.compile(r'\+( (?P<data>.*))?')
Flags = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)')
InternalDate = re.compile(r'.*INTERNALDATE "'
r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
r'"')
Literal = re.compile(r'.*{(?P<size>\d+)}$')
MapCRLF = re.compile(r'\r\n|\r|\n')
Response_code = re.compile(r'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]')
Untagged_response = re.compile(r'\* (?P<type>[A-Z-]+)( (?P<data>.*))?')
Untagged_status = re.compile(r'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?')
class IMAP4:
"""IMAP4 client class.
Instantiate with: IMAP4([host[, port]])
host - host's name (default: localhost);
port - port number (default: standard IMAP4 port).
All IMAP4rev1 commands are supported by methods of the same
name (in lower-case).
All arguments to commands are converted to strings, except for
AUTHENTICATE, and the last argument to APPEND which is passed as
an IMAP4 literal. If necessary (the string contains any
non-printing characters or white-space and isn't enclosed with
either parentheses or double quotes) each string is quoted.
However, the 'password' argument to the LOGIN command is always
quoted. If you want to avoid having an argument string quoted
(eg: the 'flags' argument to STORE) then enclose the string in
parentheses (eg: "(\Deleted)").
Each command returns a tuple: (type, [data, ...]) where 'type'
is usually 'OK' or 'NO', and 'data' is either the text from the
tagged response, or untagged results from command. Each 'data'
is either a string, or a tuple. If a tuple, then the first part
is the header of the response, and the second part contains
the data (ie: 'literal' value).
Errors raise the exception class <instance>.error("<reason>").
IMAP4 server errors raise <instance>.abort("<reason>"),
which is a sub-class of 'error'. Mailbox status changes
from READ-WRITE to READ-ONLY raise the exception class
<instance>.readonly("<reason>"), which is a sub-class of 'abort'.
"error" exceptions imply a program error.
"abort" exceptions imply the connection should be reset, and
the command re-tried.
"readonly" exceptions imply the command should be re-tried.
Note: to use this module, you must read the RFCs pertaining
to the IMAP4 protocol, as the semantics of the arguments to
each IMAP4 command are left to the invoker, not to mention
the results.
"""
class error(Exception): pass # Logical errors - debug required
class abort(error): pass # Service errors - close and retry
class readonly(abort): pass # Mailbox status changed to READ-ONLY
mustquote = re.compile(r"[^\w!#$%&'*+,.:;<=>?^`|~-]")
def __init__(self, host = '', port = IMAP4_PORT):
self.debug = Debug
self.state = 'LOGOUT'
self.literal = None # A literal argument to a command
self.tagged_commands = {} # Tagged commands awaiting response
self.untagged_responses = {} # {typ: [data, ...], ...}
self.continuation_response = '' # Last continuation response
self.is_readonly = None # READ-ONLY desired state
self.tagnum = 0
# Open socket to server.
self.open(host, port)
# Create unique tag for this session,
# and compile tagged response matcher.
self.tagpre = Int2AP(random.randint(0, 31999))
self.tagre = re.compile(r'(?P<tag>'
+ self.tagpre
+ r'\d+) (?P<type>[A-Z]+) (?P<data>.*)')
# Get server welcome message,
# request and store CAPABILITY response.
if __debug__:
self._cmd_log_len = 10
self._cmd_log_idx = 0
self._cmd_log = {} # Last `_cmd_log_len' interactions
if self.debug >= 1:
self._mesg('imaplib version %s' % __version__)
self._mesg('new IMAP4 connection, tag=%s' % self.tagpre)
self.welcome = self._get_response()
if 'PREAUTH' in self.untagged_responses:
self.state = 'AUTH'
elif 'OK' in self.untagged_responses:
self.state = 'NONAUTH'
else:
raise self.error(self.welcome)
cap = 'CAPABILITY'
self._simple_command(cap)
if not cap in self.untagged_responses:
raise self.error('no CAPABILITY response from server')
self.capabilities = tuple(self.untagged_responses[cap][-1].upper().split())
if __debug__:
if self.debug >= 3:
self._mesg('CAPABILITIES: %r' % (self.capabilities,))
for version in AllowedVersions:
if not version in self.capabilities:
continue
self.PROTOCOL_VERSION = version
return
raise self.error('server not IMAP4 compliant')
def __getattr__(self, attr):
# Allow UPPERCASE variants of IMAP4 command methods.
if attr in Commands:
return getattr(self, attr.lower())
raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
# Overridable methods
def open(self, host = '', port = IMAP4_PORT):
"""Setup connection to remote server on "host:port"
(default: localhost:standard IMAP4 port).
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.file = self.sock.makefile('rb')
def read(self, size):
"""Read 'size' bytes from remote."""
return self.file.read(size)
def readline(self):
"""Read line from remote."""
return self.file.readline()
def send(self, data):
"""Send data to remote."""
self.sock.sendall(data)
def shutdown(self):
"""Close I/O established in "open"."""
self.file.close()
self.sock.close()
def socket(self):
"""Return socket instance used to connect to IMAP4 server.
socket = <instance>.socket()
"""
return self.sock
# Utility methods
def recent(self):
"""Return most recent 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
(typ, [data]) = <instance>.recent()
'data' is None if no new messages,
else list of RECENT responses, most recent last.
"""
name = 'RECENT'
typ, dat = self._untagged_response('OK', [None], name)
if dat[-1]:
return typ, dat
typ, dat = self.noop() # Prod server for response
return self._untagged_response(typ, dat, name)
def response(self, code):
"""Return data for response 'code' if received, or None.
Old value for response 'code' is cleared.
(code, [data]) = <instance>.response(code)
"""
return self._untagged_response(code, [None], code.upper())
# IMAP4 commands
def append(self, mailbox, flags, date_time, message):
"""Append message to named mailbox.
(typ, [data]) = <instance>.append(mailbox, flags, date_time, message)
All args except `message' can be None.
"""
name = 'APPEND'
if not mailbox:
mailbox = 'INBOX'
if flags:
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags
else:
flags = None
if date_time:
date_time = Time2Internaldate(date_time)
else:
date_time = None
self.literal = MapCRLF.sub(CRLF, message)
return self._simple_command(name, mailbox, flags, date_time)
def authenticate(self, mechanism, authobject):
"""Authenticate command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - it must appear in <instance>.capabilities in the
form AUTH=<mechanism>.
'authobject' must be a callable object:
data = authobject(response)
It will be called to process server continuation responses.
It should return data that will be encoded and sent to server.
It should return None if the client abort response '*' should
be sent instead.
"""
mech = mechanism.upper()
# XXX: shouldn't this code be removed, not commented out?
#cap = 'AUTH=%s' % mech
#if not cap in self.capabilities: # Let the server decide!
# raise self.error("Server doesn't allow %s authentication." % mech)
self.literal = _Authenticator(authobject).process
typ, dat = self._simple_command('AUTHENTICATE', mech)
if typ != 'OK':
raise self.error(dat[-1])
self.state = 'AUTH'
return typ, dat
def check(self):
"""Checkpoint mailbox on server.
(typ, [data]) = <instance>.check()
"""
return self._simple_command('CHECK')
def close(self):
"""Close currently selected mailbox.
Deleted messages are removed from writable mailbox.
This is the recommended command before 'LOGOUT'.
(typ, [data]) = <instance>.close()
"""
try:
typ, dat = self._simple_command('CLOSE')
finally:
self.state = 'AUTH'
return typ, dat
def copy(self, message_set, new_mailbox):
"""Copy 'message_set' messages onto end of 'new_mailbox'.
(typ, [data]) = <instance>.copy(message_set, new_mailbox)
"""
return self._simple_command('COPY', message_set, new_mailbox)
def create(self, mailbox):
"""Create new mailbox.
(typ, [data]) = <instance>.create(mailbox)
"""
return self._simple_command('CREATE', mailbox)
def delete(self, mailbox):
"""Delete old mailbox.
(typ, [data]) = <instance>.delete(mailbox)
"""
return self._simple_command('DELETE', mailbox)
def deleteacl(self, mailbox, who):
"""Delete the ACLs (remove any rights) set for who on mailbox.
(typ, [data]) = <instance>.deleteacl(mailbox, who)
"""
return self._simple_command('DELETEACL', mailbox, who)
def expunge(self):
"""Permanently remove deleted items from selected mailbox.
Generates 'EXPUNGE' response for each deleted message.
(typ, [data]) = <instance>.expunge()
'data' is list of 'EXPUNGE'd message numbers in order received.
"""
name = 'EXPUNGE'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def fetch(self, message_set, message_parts):
"""Fetch (parts of) messages.
(typ, [data, ...]) = <instance>.fetch(message_set, message_parts)
'message_parts' should be a string of selected parts
enclosed in parentheses, eg: "(UID BODY[TEXT])".
'data' are tuples of message part envelope and data.
"""
name = 'FETCH'
typ, dat = self._simple_command(name, message_set, message_parts)
return self._untagged_response(typ, dat, name)
def getacl(self, mailbox):
"""Get the ACLs for a mailbox.
(typ, [data]) = <instance>.getacl(mailbox)
"""
typ, dat = self._simple_command('GETACL', mailbox)
return self._untagged_response(typ, dat, 'ACL')
def getquota(self, root):
"""Get the quota root's resource usage and limits.
Part of the IMAP4 QUOTA extension defined in rfc2087.
(typ, [data]) = <instance>.getquota(root)
"""
typ, dat = self._simple_command('GETQUOTA', root)
return self._untagged_response(typ, dat, 'QUOTA')
def getquotaroot(self, mailbox):
"""Get the list of quota roots for the named mailbox.
(typ, [[QUOTAROOT responses...], [QUOTA responses]]) = <instance>.getquotaroot(mailbox)
"""
typ, dat = self._simple_command('GETQUOTAROOT', mailbox)
typ, quota = self._untagged_response(typ, dat, 'QUOTA')
typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT')
return typ, [quotaroot, quota]
def list(self, directory='""', pattern='*'):
"""List mailbox names in directory matching pattern.
(typ, [data]) = <instance>.list(directory='""', pattern='*')
'data' is list of LIST responses.
"""
name = 'LIST'
typ, dat = self._simple_command(name, directory, pattern)
return self._untagged_response(typ, dat, name)
def login(self, user, password):
"""Identify client using plaintext password.
(typ, [data]) = <instance>.login(user, password)
NB: 'password' will be quoted.
"""
typ, dat = self._simple_command('LOGIN', user, self._quote(password))
if typ != 'OK':
raise self.error(dat[-1])
self.state = 'AUTH'
return typ, dat
def login_cram_md5(self, user, password):
""" Force use of CRAM-MD5 authentication.
(typ, [data]) = <instance>.login_cram_md5(user, password)
"""
self.user, self.password = user, password
return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH)
def _CRAM_MD5_AUTH(self, challenge):
""" Authobject to use with CRAM-MD5 authentication. """
import hmac
return self.user + " " + hmac.HMAC(self.password, challenge).hexdigest()
def logout(self):
"""Shutdown connection to server.
(typ, [data]) = <instance>.logout()
Returns server 'BYE' response.
"""
self.state = 'LOGOUT'
try: typ, dat = self._simple_command('LOGOUT')
except: typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
self.shutdown()
if 'BYE' in self.untagged_responses:
return 'BYE', self.untagged_responses['BYE']
return typ, dat
def lsub(self, directory='""', pattern='*'):
"""List 'subscribed' mailbox names in directory matching pattern.
(typ, [data, ...]) = <instance>.lsub(directory='""', pattern='*')
'data' are tuples of message part envelope and data.
"""
name = 'LSUB'
typ, dat = self._simple_command(name, directory, pattern)
return self._untagged_response(typ, dat, name)
def myrights(self, mailbox):
"""Show my ACLs for a mailbox (i.e. the rights that I have on mailbox).
(typ, [data]) = <instance>.myrights(mailbox)
"""
typ,dat = self._simple_command('MYRIGHTS', mailbox)
return self._untagged_response(typ, dat, 'MYRIGHTS')
def namespace(self):
""" Returns IMAP namespaces ala rfc2342
(typ, [data, ...]) = <instance>.namespace()
"""
name = 'NAMESPACE'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def noop(self):
"""Send NOOP command.
(typ, [data]) = <instance>.noop()
"""
if __debug__:
if self.debug >= 3:
self._dump_ur(self.untagged_responses)
return self._simple_command('NOOP')
def partial(self, message_num, message_part, start, length):
"""Fetch truncated part of a message.
(typ, [data, ...]) = <instance>.partial(message_num, message_part, start, length)
'data' is tuple of message part envelope and data.
"""
name = 'PARTIAL'
typ, dat = self._simple_command(name, message_num, message_part, start, length)
return self._untagged_response(typ, dat, 'FETCH')
def proxyauth(self, user):
"""Assume authentication as "user".
Allows an authorised administrator to proxy into any user's
mailbox.
(typ, [data]) = <instance>.proxyauth(user)
"""
name = 'PROXYAUTH'
return self._simple_command('PROXYAUTH', user)
def rename(self, oldmailbox, newmailbox):
"""Rename old mailbox name to new.
(typ, [data]) = <instance>.rename(oldmailbox, newmailbox)
"""
return self._simple_command('RENAME', oldmailbox, newmailbox)
def search(self, charset, *criteria):
"""Search mailbox for matching messages.
(typ, [data]) = <instance>.search(charset, criterion, ...)
'data' is space separated list of matching message numbers.
"""
name = 'SEARCH'
if charset:
typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria)
else:
typ, dat = self._simple_command(name, *criteria)
return self._untagged_response(typ, dat, name)
def select(self, mailbox='INBOX', readonly=None):
"""Select a mailbox.
Flush all untagged responses.
(typ, [data]) = <instance>.select(mailbox='INBOX', readonly=None)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via <instance>.response('FLAGS') etc.
"""
self.untagged_responses = {} # Flush old responses.
self.is_readonly = readonly
if readonly is not None:
name = 'EXAMINE'
else:
name = 'SELECT'
typ, dat = self._simple_command(name, mailbox)
if typ != 'OK':
self.state = 'AUTH' # Might have been 'SELECTED'
return typ, dat
self.state = 'SELECTED'
if 'READ-ONLY' in self.untagged_responses \
and not readonly:
if __debug__:
if self.debug >= 1:
self._dump_ur(self.untagged_responses)
raise self.readonly('%s is not writable' % mailbox)
return typ, self.untagged_responses.get('EXISTS', [None])
def setacl(self, mailbox, who, what):
"""Set a mailbox acl.
(typ, [data]) = <instance>.setacl(mailbox, who, what)
"""
return self._simple_command('SETACL', mailbox, who, what)
def setquota(self, root, limits):
"""Set the quota root's resource limits.
(typ, [data]) = <instance>.setquota(root, limits)
"""
typ, dat = self._simple_command('SETQUOTA', root, limits)
return self._untagged_response(typ, dat, 'QUOTA')
def sort(self, sort_criteria, charset, *search_criteria):
"""IMAP4rev1 extension SORT command.
(typ, [data]) = <instance>.sort(sort_criteria, charset, search_criteria, ...)
"""
name = 'SORT'
#if not name in self.capabilities: # Let the server decide!
# raise self.error('unimplemented extension command: %s' % name)
if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
sort_criteria = '(%s)' % sort_criteria
typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria)
return self._untagged_response(typ, dat, name)
def status(self, mailbox, names):
"""Request named status conditions for mailbox.
(typ, [data]) = <instance>.status(mailbox, names)
"""
name = 'STATUS'
#if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide!
# raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name)
typ, dat = self._simple_command(name, mailbox, names)
return self._untagged_response(typ, dat, name)
def store(self, message_set, command, flags):
"""Alters flag dispositions for messages in mailbox.
(typ, [data]) = <instance>.store(message_set, command, flags)
"""
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags # Avoid quoting the flags
typ, dat = self._simple_command('STORE', message_set, command, flags)
return self._untagged_response(typ, dat, 'FETCH')
def subscribe(self, mailbox):
"""Subscribe to new mailbox.
(typ, [data]) = <instance>.subscribe(mailbox)
"""
return self._simple_command('SUBSCRIBE', mailbox)
def thread(self, threading_algorithm, charset, *search_criteria):
"""IMAPrev1 extension THREAD command.
(type, [data]) = <instance>.thread(threading_alogrithm, charset, search_criteria, ...)
"""
name = 'THREAD'
typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria)
return self._untagged_response(typ, dat, name)
def uid(self, command, *args):
"""Execute "command arg ..." with messages identified by UID,
rather than message number.
(typ, [data]) = <instance>.uid(command, arg1, arg2, ...)
Returns response appropriate to 'command'.
"""
command = command.upper()
if not command in Commands:
raise self.error("Unknown IMAP4 UID command: %s" % command)
if self.state not in Commands[command]:
raise self.error('command %s illegal in state %s'
% (command, self.state))
name = 'UID'
typ, dat = self._simple_command(name, command, *args)
if command in ('SEARCH', 'SORT'):
name = command
else:
name = 'FETCH'
return self._untagged_response(typ, dat, name)
def unsubscribe(self, mailbox):
"""Unsubscribe from old mailbox.
(typ, [data]) = <instance>.unsubscribe(mailbox)
"""
return self._simple_command('UNSUBSCRIBE', mailbox)
def xatom(self, name, *args):
"""Allow simple extension commands
notified by server in CAPABILITY response.
Assumes command is legal in current state.
(typ, [data]) = <instance>.xatom(name, arg, ...)
Returns response appropriate to extension command `name'.
"""
name = name.upper()
#if not name in self.capabilities: # Let the server decide!
# raise self.error('unknown extension command: %s' % name)
if not name in Commands:
Commands[name] = (self.state,)
return self._simple_command(name, *args)
# Private methods
def _append_untagged(self, typ, dat):
if dat is None: dat = ''
ur = self.untagged_responses
if __debug__:
if self.debug >= 5:
self._mesg('untagged_responses[%s] %s += ["%s"]' %
(typ, len(ur.get(typ,'')), dat))
if typ in ur:
ur[typ].append(dat)
else:
ur[typ] = [dat]
def _check_bye(self):
bye = self.untagged_responses.get('BYE')
if bye:
raise self.abort(bye[-1])
def _command(self, name, *args):
if self.state not in Commands[name]:
self.literal = None
raise self.error(
'command %s illegal in state %s' % (name, self.state))
for typ in ('OK', 'NO', 'BAD'):
if typ in self.untagged_responses:
del self.untagged_responses[typ]
if 'READ-ONLY' in self.untagged_responses \
and not self.is_readonly:
raise self.readonly('mailbox status changed to READ-ONLY')
tag = self._new_tag()
data = '%s %s' % (tag, name)
for arg in args:
if arg is None: continue
data = '%s %s' % (data, self._checkquote(arg))
literal = self.literal
if literal is not None:
self.literal = None
if type(literal) is type(self._command):
literator = literal
else:
literator = None
data = '%s {%s}' % (data, len(literal))
if __debug__:
if self.debug >= 4:
self._mesg('> %s' % data)
else:
self._log('> %s' % data)
try:
self.send('%s%s' % (data, CRLF))
except (socket.error, OSError), val:
raise self.abort('socket error: %s' % val)
if literal is None:
return tag
while 1:
# Wait for continuation response
while self._get_response():
if self.tagged_commands[tag]: # BAD/NO?
return tag
# Send literal
if literator:
literal = literator(self.continuation_response)
if __debug__:
if self.debug >= 4:
self._mesg('write literal size %s' % len(literal))
try:
self.send(literal)
self.send(CRLF)
except (socket.error, OSError), val:
raise self.abort('socket error: %s' % val)
if not literator:
break
return tag
def _command_complete(self, name, tag):
self._check_bye()
try:
typ, data = self._get_tagged_response(tag)
except self.abort, val:
raise self.abort('command: %s => %s' % (name, val))
except self.error, val:
raise self.error('command: %s => %s' % (name, val))
self._check_bye()
if typ == 'BAD':
raise self.error('%s command error: %s %s' % (name, typ, data))
return typ, data
def _get_response(self):
# Read response and store.
#
# Returns None for continuation responses,
# otherwise first response line received.
resp = self._get_line()
# Command completion response?
if self._match(self.tagre, resp):
tag = self.mo.group('tag')
if not tag in self.tagged_commands:
raise self.abort('unexpected tagged response: %s' % resp)
typ = self.mo.group('type')
dat = self.mo.group('data')
self.tagged_commands[tag] = (typ, [dat])
else:
dat2 = None
# '*' (untagged) responses?
if not self._match(Untagged_response, resp):
if self._match(Untagged_status, resp):
dat2 = self.mo.group('data2')
if self.mo is None:
# Only other possibility is '+' (continuation) response...
if self._match(Continuation, resp):
self.continuation_response = self.mo.group('data')
return None # NB: indicates continuation
raise self.abort("unexpected response: '%s'" % resp)
typ = self.mo.group('type')
dat = self.mo.group('data')
if dat is None: dat = '' # Null untagged response
if dat2: dat = dat + ' ' + dat2
# Is there a literal to come?
while self._match(Literal, dat):
# Read literal direct from connection.
size = int(self.mo.group('size'))
if __debug__:
if self.debug >= 4:
self._mesg('read literal size %s' % size)
data = self.read(size)
# Store response with literal as tuple
self._append_untagged(typ, (dat, data))
# Read trailer - possibly containing another literal
dat = self._get_line()
self._append_untagged(typ, dat)
# Bracketed response information?
if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat):
self._append_untagged(self.mo.group('type'), self.mo.group('data'))
if __debug__:
if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'):
self._mesg('%s response: %s' % (typ, dat))
return resp
def _get_tagged_response(self, tag):
while 1:
result = self.tagged_commands[tag]
if result is not None:
del self.tagged_commands[tag]
return result
# Some have reported "unexpected response" exceptions.
# Note that ignoring them here causes loops.
# Instead, send me details of the unexpected response and
# I'll update the code in `_get_response()'.
try:
self._get_response()
except self.abort, val:
if __debug__:
if self.debug >= 1:
self.print_log()
raise
def _get_line(self):
line = self.readline()
if not line:
raise self.abort('socket error: EOF')
# Protocol mandates all lines terminated by CRLF
line = line[:-2]
if __debug__:
if self.debug >= 4:
self._mesg('< %s' % line)
else:
self._log('< %s' % line)
return line
def _match(self, cre, s):
# Run compiled regular expression match method on 's'.
# Save result, return success.
self.mo = cre.match(s)
if __debug__:
if self.mo is not None and self.debug >= 5:
self._mesg("\tmatched r'%s' => %r" % (cre.pattern, self.mo.groups()))
return self.mo is not None
def _new_tag(self):
tag = '%s%s' % (self.tagpre, self.tagnum)
self.tagnum = self.tagnum + 1
self.tagged_commands[tag] = None
return tag
def _checkquote(self, arg):
# Must quote command args if non-alphanumeric chars present,
# and not already quoted.
if type(arg) is not type(''):
return arg
if len(arg) >= 2 and (arg[0],arg[-1]) in (('(',')'),('"','"')):
return arg
if arg and self.mustquote.search(arg) is None:
return arg
return self._quote(arg)
def _quote(self, arg):
arg = arg.replace('\\', '\\\\')
arg = arg.replace('"', '\\"')
return '"%s"' % arg
def _simple_command(self, name, *args):
return self._command_complete(name, self._command(name, *args))
def _untagged_response(self, typ, dat, name):
if typ == 'NO':
return typ, dat
if not name in self.untagged_responses:
return typ, [None]
data = self.untagged_responses.pop(name)
if __debug__:
if self.debug >= 5:
self._mesg('untagged_responses[%s] => %s' % (name, data))
return typ, data
if __debug__:
def _mesg(self, s, secs=None):
if secs is None:
secs = time.time()
tm = time.strftime('%M:%S', time.localtime(secs))
sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s))
sys.stderr.flush()
def _dump_ur(self, dict):
# Dump untagged responses (in `dict').
l = dict.items()
if not l: return
t = '\n\t\t'
l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
def _log(self, line):
# Keep log of last `_cmd_log_len' interactions for debugging.
self._cmd_log[self._cmd_log_idx] = (line, time.time())
self._cmd_log_idx += 1
if self._cmd_log_idx >= self._cmd_log_len:
self._cmd_log_idx = 0
def print_log(self):
self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log))
i, n = self._cmd_log_idx, self._cmd_log_len
while n:
try:
self._mesg(*self._cmd_log[i])
except:
pass
i += 1
if i >= self._cmd_log_len:
i = 0
n -= 1
class IMAP4_SSL(IMAP4):
"""IMAP4 client class over SSL connection
Instantiate with: IMAP4_SSL([host[, port[, keyfile[, certfile]]]])
host - host's name (default: localhost);
port - port number (default: standard IMAP4 SSL port).
keyfile - PEM formatted file that contains your private key (default: None);
certfile - PEM formatted certificate chain file (default: None);
for more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, host = '', port = IMAP4_SSL_PORT, keyfile = None, certfile = None):
self.keyfile = keyfile
self.certfile = certfile
IMAP4.__init__(self, host, port)
def open(self, host = '', port = IMAP4_SSL_PORT):
"""Setup connection to remote server on "host:port".
(default: localhost:standard IMAP4 SSL port).
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.sslobj = socket.ssl(self.sock, self.keyfile, self.certfile)
def read(self, size):
"""Read 'size' bytes from remote."""
# sslobj.read() sometimes returns < size bytes
chunks = []
read = 0
while read < size:
data = self.sslobj.read(size-read)
read += len(data)
chunks.append(data)
return ''.join(chunks)
def readline(self):
"""Read line from remote."""
# NB: socket.ssl needs a "readline" method, or perhaps a "makefile" method.
line = []
while 1:
char = self.sslobj.read(1)
line.append(char)
if char == "\n": return ''.join(line)
def send(self, data):
"""Send data to remote."""
# NB: socket.ssl needs a "sendall" method to match socket objects.
bytes = len(data)
while bytes > 0:
sent = self.sslobj.write(data)
if sent == bytes:
break # avoid copy
data = data[sent:]
bytes = bytes - sent
def shutdown(self):
"""Close I/O established in "open"."""
self.sock.close()
def socket(self):
"""Return socket instance used to connect to IMAP4 server.
socket = <instance>.socket()
"""
return self.sock
def ssl(self):
"""Return SSLObject instance used to communicate with the IMAP4 server.
ssl = <instance>.socket.ssl()
"""
return self.sslobj
class IMAP4_stream(IMAP4):
"""IMAP4 client class over a stream
Instantiate with: IMAP4_stream(command)
where "command" is a string that can be passed to os.popen2()
for more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, command):
self.command = command
IMAP4.__init__(self)
def open(self, host = None, port = None):
"""Setup a stream connection.
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = None # For compatibility with parent class
self.port = None
self.sock = None
self.file = None
self.writefile, self.readfile = os.popen2(self.command)
def read(self, size):
"""Read 'size' bytes from remote."""
return self.readfile.read(size)
def readline(self):
"""Read line from remote."""
return self.readfile.readline()
def send(self, data):
"""Send data to remote."""
self.writefile.write(data)
self.writefile.flush()
def shutdown(self):
"""Close I/O established in "open"."""
self.readfile.close()
self.writefile.close()
class _Authenticator:
"""Private class to provide en/decoding
for base64-based authentication conversation.
"""
def __init__(self, mechinst):
self.mech = mechinst # Callable object to provide/process data
def process(self, data):
ret = self.mech(self.decode(data))
if ret is None:
return '*' # Abort conversation
return self.encode(ret)
def encode(self, inp):
#
# Invoke binascii.b2a_base64 iteratively with
# short even length buffers, strip the trailing
# line feed from the result and append. "Even"
# means a number that factors to both 6 and 8,
# so when it gets to the end of the 8-bit input
# there's no partial 6-bit output.
#
oup = ''
while inp:
if len(inp) > 48:
t = inp[:48]
inp = inp[48:]
else:
t = inp
inp = ''
e = binascii.b2a_base64(t)
if e:
oup = oup + e[:-1]
return oup
def decode(self, inp):
if not inp:
return ''
return binascii.a2b_base64(inp)
Mon2num = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
def Internaldate2tuple(resp):
"""Convert IMAP4 INTERNALDATE to UT.
Returns Python time module tuple.
"""
mo = InternalDate.match(resp)
if not mo:
return None
mon = Mon2num[mo.group('mon')]
zonen = mo.group('zonen')
day = int(mo.group('day'))
year = int(mo.group('year'))
hour = int(mo.group('hour'))
min = int(mo.group('min'))
sec = int(mo.group('sec'))
zoneh = int(mo.group('zoneh'))
zonem = int(mo.group('zonem'))
# INTERNALDATE timezone must be subtracted to get UT
zone = (zoneh*60 + zonem)*60
if zonen == '-':
zone = -zone
tt = (year, mon, day, hour, min, sec, -1, -1, -1)
utc = time.mktime(tt)
# Following is necessary because the time module has no 'mkgmtime'.
# 'mktime' assumes arg in local timezone, so adds timezone/altzone.
lt = time.localtime(utc)
if time.daylight and lt[-1]:
zone = zone + time.altzone
else:
zone = zone + time.timezone
return time.localtime(utc - zone)
def Int2AP(num):
"""Convert integer to A-P string representation."""
val = ''; AP = 'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val = AP[mod] + val
return val
def ParseFlags(resp):
"""Convert IMAP4 flags response to python tuple."""
mo = Flags.match(resp)
if not mo:
return ()
return tuple(mo.group('flags').split())
def Time2Internaldate(date_time):
"""Convert 'date_time' to IMAP4 INTERNALDATE representation.
Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'
"""
if isinstance(date_time, (int, float)):
tt = time.localtime(date_time)
elif isinstance(date_time, (tuple, time.struct_time)):
tt = date_time
elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
return date_time # Assume in correct format
else:
raise ValueError("date_time not of a known type")
dt = time.strftime("%d-%b-%Y %H:%M:%S", tt)
if dt[0] == '0':
dt = ' ' + dt[1:]
if time.daylight and tt[-1]:
zone = -time.altzone
else:
zone = -time.timezone
return '"' + dt + " %+03d%02d" % divmod(zone//60, 60) + '"'
if __name__ == '__main__':
# To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]'
# or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"'
# to test the IMAP4_stream class
import getopt, getpass
try:
optlist, args = getopt.getopt(sys.argv[1:], 'd:s:')
except getopt.error, val:
optlist, args = (), ()
stream_command = None
for opt,val in optlist:
if opt == '-d':
Debug = int(val)
elif opt == '-s':
stream_command = val
if not args: args = (stream_command,)
if not args: args = ('',)
host = args[0]
USER = getpass.getuser()
PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost"))
test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'}
test_seq1 = (
('login', (USER, PASSWD)),
('create', ('/tmp/xxx 1',)),
('rename', ('/tmp/xxx 1', '/tmp/yyy')),
('CREATE', ('/tmp/yyz 2',)),
('append', ('/tmp/yyz 2', None, None, test_mesg)),
('list', ('/tmp', 'yy*')),
('select', ('/tmp/yyz 2',)),
('search', (None, 'SUBJECT', 'test')),
('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')),
('store', ('1', 'FLAGS', '(\Deleted)')),
('namespace', ()),
('expunge', ()),
('recent', ()),
('close', ()),
)
test_seq2 = (
('select', ()),
('response',('UIDVALIDITY',)),
('uid', ('SEARCH', 'ALL')),
('response', ('EXISTS',)),
('append', (None, None, None, test_mesg)),
('recent', ()),
('logout', ()),
)
def run(cmd, args):
M._mesg('%s %s' % (cmd, args))
typ, dat = getattr(M, cmd)(*args)
M._mesg('%s => %s %s' % (cmd, typ, dat))
if typ == 'NO': raise dat[0]
return dat
try:
if stream_command:
M = IMAP4_stream(stream_command)
else:
M = IMAP4(host)
if M.state == 'AUTH':
test_seq1 = test_seq1[1:] # Login not needed
M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION)
M._mesg('CAPABILITIES = %r' % (M.capabilities,))
for cmd,args in test_seq1:
run(cmd, args)
for ml in run('list', ('/tmp/', 'yy%')):
mo = re.match(r'.*"([^"]+)"$', ml)
if mo: path = mo.group(1)
else: path = ml.split()[-1]
run('delete', (path,))
for cmd,args in test_seq2:
dat = run(cmd, args)
if (cmd,args) != ('uid', ('SEARCH', 'ALL')):
continue
uid = dat[-1].split()
if not uid: continue
run('uid', ('FETCH', '%s' % uid[-1],
'(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)'))
print '\nAll tests OK.'
except:
print '\nTests failed.'
if not Debug:
print '''
If you would like to see debugging output,
try: %s -d5
''' % sys.argv[0]
raise
| gpl-2.0 |
Kodextor/arkos-beacon | beacon/beacon.py | 1 | 2491 | #!/usr/bin/env python
import OpenSSL
import os
import pam
import json
import socket
import ssl
import subprocess
import threading
def shutdown():
subprocess.call(['halt'])
def reload():
subprocess.call(['systemctl', 'restart', 'genesis'])
def reboot():
subprocess.call(['reboot'])
def handle_client(sock):
data = json.loads(sock.recv(4096))
if data['request'] == 'status':
f = open('/etc/hostname')
if os.path.exists('/var/run/genesis.pid'):
status = 'active'
else:
status = 'inactive'
sock.sendall(json.dumps({
'response': 'ok',
'name': f.readline().strip('\n'),
'status': status,
}))
f.close()
elif data['request'] == 'reload':
if pam.authenticate(data['user'], data['pass'], service='account'):
sock.sendall(json.dumps({
'response': 'ok',
}))
reload()
else:
sock.sendall(json.dumps({
'response': 'fail',
}))
elif data['request'] == 'shutdown':
if pam.authenticate(data['user'], data['pass'], service='account'):
sock.sendall(json.dumps({
'response': 'ok',
}))
shutdown()
else:
sock.sendall(json.dumps({
'response': 'fail',
}))
elif data['request'] == 'reboot':
if pam.authenticate(data['user'], data['pass'], service='account'):
sock.sendall(json.dumps({
'response': 'ok',
}))
reboot()
else:
sock.sendall(json.dumps({
'response': 'fail',
}))
elif data['request'] == 'ping':
sock.sendall(json.dumps({'response': 'ok'}))
def gencert(sslcert, sslkey):
if not os.path.exists('/etc/beacon'):
os.mkdir('/etc/beacon')
k = OpenSSL.crypto.PKey()
k.generate_key(OpenSSL.crypto.TYPE_RSA, 1024)
crt = OpenSSL.crypto.X509()
crt.set_serial_number(1)
crt.gmtime_adj_notBefore(0)
crt.gmtime_adj_notAfter(10*365*24*60*60)
crt.set_pubkey(k)
crt.sign(k, 'sha1')
open(sslcert, "wt").write(
OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, crt))
open(sslkey, "wt").write(
OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, k))
def serve_beacon():
sslcert = '/etc/beacon/cert.pem'
sslkey = '/etc/beacon/pkey.key'
if not os.path.exists(sslcert) or not os.path.exists(sslkey):
gencert(sslcert, sslkey)
s = socket.socket()
s.bind(('0.0.0.0', 8765))
s.listen(1)
while True:
conn, address = s.accept()
cstream = ssl.wrap_socket(conn, server_side=True, certfile=sslcert,
keyfile=sslkey, ssl_version=ssl.PROTOCOL_TLSv1)
thread = threading.Thread(target=handle_client, args=[cstream])
thread.daemon = True
thread.start()
| gpl-3.0 |
bsmr-eve/Pyfa | gui/builtinAdditionPanes/projectedView.py | 1 | 11834 | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
from logbook import Logger
import gui.builtinAdditionPanes.droneView
import gui.display as d
import gui.globalEvents as GE
from eos.saveddata.drone import Drone as es_Drone
from eos.saveddata.fighter import Fighter as es_Fighter
from eos.saveddata.module import Module as es_Module
from gui.builtinViewColumns.state import State
from gui.contextMenu import ContextMenu
from gui.utils.staticHelpers import DragDropHelper
from service.fit import Fit
from service.market import Market
import gui.fitCommands as cmd
pyfalog = Logger(__name__)
class DummyItem(object):
def __init__(self, txt):
self.name = txt
self.iconID = None
class DummyEntry(object):
def __init__(self, txt):
self.item = DummyItem(txt)
class ProjectedViewDrop(wx.DropTarget):
def __init__(self, dropFn, *args, **kwargs):
super(ProjectedViewDrop, self).__init__(*args, **kwargs)
self.dropFn = dropFn
# this is really transferring an EVE itemID
self.dropData = wx.TextDataObject()
self.SetDataObject(self.dropData)
def OnData(self, x, y, t):
if self.GetData():
dragged_data = DragDropHelper.data
data = dragged_data.split(':')
self.dropFn(x, y, data)
return t
class ProjectedView(d.Display):
DEFAULT_COLS = ["State",
"Ammo Icon",
"Base Icon",
"Base Name",
"Ammo"]
def __init__(self, parent):
d.Display.__init__(self, parent, style=wx.LC_SINGLE_SEL | wx.BORDER_NONE)
self.lastFitId = None
self.mainFrame.Bind(GE.FIT_CHANGED, self.fitChanged)
self.Bind(wx.EVT_LEFT_DOWN, self.click)
self.Bind(wx.EVT_RIGHT_DOWN, self.click)
self.Bind(wx.EVT_LEFT_DCLICK, self.remove)
self.Bind(wx.EVT_KEY_UP, self.kbEvent)
self.droneView = gui.builtinAdditionPanes.droneView.DroneView
if "__WXGTK__" in wx.PlatformInfo:
self.Bind(wx.EVT_RIGHT_UP, self.scheduleMenu)
else:
self.Bind(wx.EVT_RIGHT_DOWN, self.scheduleMenu)
self.Bind(wx.EVT_LIST_BEGIN_DRAG, self.startDrag)
self.SetDropTarget(ProjectedViewDrop(self.handleListDrag))
def handleListDrag(self, x, y, data):
"""
Handles dragging of items from various pyfa displays which support it
data is list with two indices:
data[0] is hard-coded str of originating source
data[1] is typeID or index of data we want to manipulate
"""
sFit = Fit.getInstance()
fitID = self.mainFrame.getActiveFit()
fit = sFit.getFit(self.mainFrame.getActiveFit())
if data[0] == "projected":
# if source is coming from projected, we are trying to combine drones.
pass
# removing merge functionality - if people complain about it, can add it back as a command
# self.mergeDrones(x, y, int(data[1]))
elif data[0] == "fitting":
dstRow, _ = self.HitTest((x, y))
# Gather module information to get position
module = fit.modules[int(data[1])]
self.mainFrame.command.Submit(cmd.GuiAddProjectedCommand(fitID, module.itemID, 'item'))
# sFit.project(fit.ID, module)
# wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=fit.ID))
elif data[0] == "market":
# sFit = Fit.getInstance()
self.mainFrame.command.Submit(cmd.GuiAddProjectedCommand(fitID, int(data[1]), 'item'))
# sFit.project(fit.ID, int(data[1]))
# wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=fit.ID))
def kbEvent(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_DELETE or keycode == wx.WXK_NUMPAD_DELETE:
fitID = self.mainFrame.getActiveFit()
sFit = Fit.getInstance()
row = self.GetFirstSelected()
if row != -1:
sFit.removeProjected(fitID, self.get(row))
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=fitID))
def handleDrag(self, type, fitID):
# Those are drags coming from pyfa sources, NOT builtin wx drags
if type == "fit":
activeFit = self.mainFrame.getActiveFit()
if activeFit:
self.mainFrame.command.Submit(cmd.GuiAddProjectedCommand(activeFit, fitID, 'fit'))
def startDrag(self, event):
row = event.GetIndex()
if row != -1 and isinstance(self.get(row), es_Drone):
data = wx.TextDataObject()
dataStr = "projected:" + str(self.GetItemData(row))
data.SetText(dataStr)
dropSource = wx.DropSource(self)
dropSource.SetData(data)
DragDropHelper.data = dataStr
dropSource.DoDragDrop()
def mergeDrones(self, x, y, itemID):
srcRow = self.FindItemData(-1, itemID)
dstRow, _ = self.HitTest((x, y))
if srcRow != -1 and dstRow != -1:
self._merge(srcRow, dstRow)
def _merge(self, src, dst):
dstDrone = self.get(dst)
if isinstance(dstDrone, es_Drone):
sFit = Fit.getInstance()
fitID = self.mainFrame.getActiveFit()
if sFit.mergeDrones(fitID, self.get(src), dstDrone, True):
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=fitID))
@staticmethod
def moduleSort(module):
return module.item.name
@staticmethod
def fighterSort(fighter):
return fighter.item.name
def droneSort(self, drone):
item = drone.item
if item.marketGroup is None:
item = item.metaGroup.parent
return (self.droneView.DRONE_ORDER.index(item.marketGroup.name),
drone.item.name)
@staticmethod
def fitSort(fit):
return fit.name
def fitChanged(self, event):
sFit = Fit.getInstance()
fit = sFit.getFit(event.fitID)
# pyfalog.debug("ProjectedView::fitChanged: {}", repr(fit))
self.Parent.Parent.DisablePage(self, not fit or fit.isStructure)
# Clear list and get out if current fitId is None
if event.fitID is None and self.lastFitId is not None:
self.DeleteAllItems()
self.lastFitId = None
event.Skip()
return
stuff = []
if fit is not None:
# pyfalog.debug(" Collecting list of stuff to display in ProjectedView")
self.modules = fit.projectedModules[:]
self.drones = fit.projectedDrones[:]
self.fighters = fit.projectedFighters[:]
self.fits = fit.projectedFits[:]
self.modules.sort(key=self.moduleSort)
self.drones.sort(key=self.droneSort)
self.fighters.sort(key=self.fighterSort)
self.fits.sort(key=self.fitSort)
stuff.extend(self.modules)
stuff.extend(self.drones)
stuff.extend(self.fighters)
stuff.extend(self.fits)
if event.fitID != self.lastFitId:
self.lastFitId = event.fitID
item = self.GetNextItem(-1, wx.LIST_NEXT_ALL, wx.LIST_STATE_DONTCARE)
if item != -1:
self.EnsureVisible(item)
self.deselectItems()
if not stuff:
stuff = [DummyEntry("Drag an item or fit, or use right-click menu for wormhole effects")]
self.update(stuff)
event.Skip()
def get(self, row):
if row == -1:
return None
numMods = len(self.modules)
numDrones = len(self.drones)
numFighters = len(self.fighters)
numFits = len(self.fits)
if (numMods + numDrones + numFighters + numFits) == 0:
return None
if row < numMods:
stuff = self.modules[row]
elif row - numMods < numDrones:
stuff = self.drones[row - numMods]
elif row - numMods - numDrones < numFighters:
stuff = self.fighters[row - numMods - numDrones]
else:
stuff = self.fits[row - numMods - numDrones - numFighters]
return stuff
def click(self, event):
event.Skip()
row, _ = self.HitTest(event.Position)
if row != -1:
item = self.get(row)
col = self.getColumn(event.Position)
if col == self.getColIndex(State):
fitID = self.mainFrame.getActiveFit()
sFit = Fit.getInstance()
sFit.toggleProjected(fitID, item, "right" if event.Button == 3 else "left")
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=fitID))
def scheduleMenu(self, event):
event.Skip()
if self.getColumn(event.Position) != self.getColIndex(State):
wx.CallAfter(self.spawnMenu)
def spawnMenu(self):
fitID = self.mainFrame.getActiveFit()
if fitID is None:
return
sel = self.GetFirstSelected()
context = ()
item = self.get(sel)
if item is not None:
sMkt = Market.getInstance()
if isinstance(item, es_Drone):
srcContext = "projectedDrone"
itemContext = sMkt.getCategoryByItem(item.item).name
context = ((srcContext, itemContext),)
elif isinstance(item, es_Fighter):
srcContext = "projectedFighter"
itemContext = sMkt.getCategoryByItem(item.item).name
context = ((srcContext, itemContext),)
elif isinstance(item, es_Module):
modSrcContext = "projectedModule"
modItemContext = sMkt.getCategoryByItem(item.item).name
modFullContext = (modSrcContext, modItemContext)
if item.charge is not None:
chgSrcContext = "projectedCharge"
chgItemContext = sMkt.getCategoryByItem(item.charge).name
chgFullContext = (chgSrcContext, chgItemContext)
context = (modFullContext, chgFullContext)
else:
context = (modFullContext,)
else:
fitSrcContext = "projectedFit"
fitItemContext = item.name
context = ((fitSrcContext, fitItemContext),)
context += (("projected",),)
menu = ContextMenu.getMenu((item,) if item is not None else [], *context)
if menu is not None:
self.PopupMenu(menu)
def remove(self, event):
row, _ = self.HitTest(event.Position)
if row != -1:
col = self.getColumn(event.Position)
if col != self.getColIndex(State):
fitID = self.mainFrame.getActiveFit()
thing = self.get(row)
if thing: # thing doesn't exist if it's the dummy value
self.mainFrame.command.Submit(cmd.GuiRemoveProjectedCommand(fitID, thing))
| gpl-3.0 |
openhatch/new-mini-tasks | vendor/packages/Django/django/contrib/admin/actions.py | 101 | 3193 | """
Built-in, globally-available admin actions.
"""
from django.core.exceptions import PermissionDenied
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.db import router
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy, ugettext as _
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it delets all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_text(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_text(opts.verbose_name)
else:
objects_name = force_text(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, current_app=modeladmin.admin_site.name)
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
| apache-2.0 |
fitoria/askbot-devel | askbot/management/commands/askbot_update_index.py | 9 | 3711 | import sys
from optparse import make_option
from django.core.management import get_commands, load_command_class
from django.utils.translation import activate as activate_language
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
try:
from haystack.management.commands.update_index import Command as UpdateCommand
haystack_option_list = [option for option in UpdateCommand.base_options if option.get_opt_string() != '--verbosity']
except ImportError:
haystack_option_list = []
class Command(BaseCommand):
help = "Completely rebuilds the search index by removing the old data and then updating."
base_options = [make_option("-l", "--language", action="store", type="string", dest="language",
help='Language to user, in language code format'),]
option_list = list(BaseCommand.option_list) + haystack_option_list + base_options
def handle(self, *args, **options):
lang_code = options.get('language', settings.LANGUAGE_CODE.lower())
activate_language(lang_code)
options['using'] = ['default_%s' % lang_code[:2],]
klass = self._get_command_class('update_index')
klass.handle(*args, **options)
def _get_command_class(self, name):
try:
app_name = get_commands()[name]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
except KeyError:
raise CommandError("Unknown command: %r" % name)
return klass
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
show_traceback = options.get('traceback', False)
if self.can_import_settings:
try:
#language part used to be here
pass
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
if show_traceback:
traceback.print_exc()
else:
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
| gpl-3.0 |
ampax/edx-platform | common/djangoapps/external_auth/djangostore.py | 224 | 3356 | """A openid store using django cache"""
from openid.store.interface import OpenIDStore
from openid.store import nonce
from django.core.cache import cache
import logging
import time
DEFAULT_ASSOCIATIONS_TIMEOUT = 60
DEFAULT_NONCE_TIMEOUT = 600
ASSOCIATIONS_KEY_PREFIX = 'openid.provider.associations.'
NONCE_KEY_PREFIX = 'openid.provider.nonce.'
log = logging.getLogger('DjangoOpenIDStore')
def get_url_key(server_url):
key = ASSOCIATIONS_KEY_PREFIX + server_url
return key
def get_nonce_key(server_url, timestamp, salt):
key = '{prefix}{url}.{ts}.{salt}'.format(prefix=NONCE_KEY_PREFIX,
url=server_url,
ts=timestamp,
salt=salt)
return key
class DjangoOpenIDStore(OpenIDStore):
def __init__(self):
log.info('DjangoStore cache:' + str(cache.__class__))
def storeAssociation(self, server_url, assoc):
key = get_url_key(server_url)
log.info('storeAssociation {0}'.format(key))
associations = cache.get(key, {})
associations[assoc.handle] = assoc
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
def getAssociation(self, server_url, handle=None):
key = get_url_key(server_url)
log.info('getAssociation {0}'.format(key))
associations = cache.get(key, {})
assoc = None
if handle is None:
# get best association
valid_assocs = [a for a in associations if a.getExpiresIn() > 0]
if valid_assocs:
valid_assocs.sort(lambda a: a.getExpiresIn(), reverse=True)
assoc = valid_assocs.sort[0]
else:
assoc = associations.get(handle)
# check expiration and remove if it has expired
if assoc and assoc.getExpiresIn() <= 0:
if handle is None:
cache.delete(key)
else:
associations.pop(handle)
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
assoc = None
return assoc
def removeAssociation(self, server_url, handle):
key = get_url_key(server_url)
log.info('removeAssociation {0}'.format(key))
associations = cache.get(key, {})
removed = False
if associations:
if handle is None:
cache.delete(key)
removed = True
else:
assoc = associations.pop(handle, None)
if assoc:
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
removed = True
return removed
def useNonce(self, server_url, timestamp, salt):
key = get_nonce_key(server_url, timestamp, salt)
log.info('useNonce {0}'.format(key))
if abs(timestamp - time.time()) > nonce.SKEW:
return False
anonce = cache.get(key)
found = False
if anonce is None:
cache.set(key, '-', DEFAULT_NONCE_TIMEOUT)
found = False
else:
found = True
return found
def cleanupNonces(self):
# not necesary, keys will timeout
return 0
def cleanupAssociations(self):
# not necesary, keys will timeout
return 0
| agpl-3.0 |
devoid/nova | nova/api/openstack/compute/plugins/v3/server_diagnostics.py | 3 | 1916 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova import compute
from nova import exception
ALIAS = "os-server-diagnostics"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class ServerDiagnosticsController(object):
@extensions.expected_errors(404)
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
compute_api = compute.API()
try:
instance = compute_api.get(context, server_id)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(e.format_message())
return compute_api.get_diagnostics(context, instance)
class ServerDiagnostics(extensions.V3APIExtensionBase):
"""Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = ALIAS
version = 1
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
resources = [
extensions.ResourceExtension(ALIAS,
ServerDiagnosticsController(),
parent=parent_def)]
return resources
def get_controller_extensions(self):
return []
| apache-2.0 |
chiragjogi/odoo | openerp/addons/base/ir/ir_attachment.py | 183 | 16487 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hashlib
import itertools
import logging
import os
import re
from openerp import tools
from openerp.tools.translate import _
from openerp.exceptions import AccessError
from openerp.osv import fields,osv
from openerp import SUPERUSER_ID
from openerp.osv.orm import except_orm
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class ir_attachment(osv.osv):
"""Attachments are used to link binary files or url to any openerp document.
External attachment storage
---------------------------
The 'data' function field (_data_get,data_set) is implemented using
_file_read, _file_write and _file_delete which can be overridden to
implement other storage engines, shuch methods should check for other
location pseudo uri (example: hdfs://hadoppserver)
The default implementation is the file:dirname location that stores files
on the local filesystem using name based on their sha1 hash
"""
_order = 'id desc'
def _name_get_resname(self, cr, uid, ids, object, method, context):
data = {}
for attachment in self.browse(cr, uid, ids, context=context):
model_object = attachment.res_model
res_id = attachment.res_id
if model_object and res_id:
model_pool = self.pool[model_object]
res = model_pool.name_get(cr,uid,[res_id],context)
res_name = res and res[0][1] or None
if res_name:
field = self._columns.get('res_name',False)
if field and len(res_name) > field.size:
res_name = res_name[:30] + '...'
data[attachment.id] = res_name or False
else:
data[attachment.id] = False
return data
def _storage(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'ir_attachment.location', 'file')
def _filestore(self, cr, uid, context=None):
return tools.config.filestore(cr.dbname)
def force_storage(self, cr, uid, context=None):
"""Force all attachments to be stored in the currently configured storage"""
if not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise AccessError(_('Only administrators can execute this action.'))
location = self._storage(cr, uid, context)
domain = {
'db': [('store_fname', '!=', False)],
'file': [('db_datas', '!=', False)],
}[location]
ids = self.search(cr, uid, domain, context=context)
for attach in self.browse(cr, uid, ids, context=context):
attach.write({'datas': attach.datas})
return True
# 'data' field implementation
def _full_path(self, cr, uid, path):
# sanitize ath
path = re.sub('[.]', '', path)
path = path.strip('/\\')
return os.path.join(self._filestore(cr, uid), path)
def _get_path(self, cr, uid, bin_data):
sha = hashlib.sha1(bin_data).hexdigest()
# retro compatibility
fname = sha[:3] + '/' + sha
full_path = self._full_path(cr, uid, fname)
if os.path.isfile(full_path):
return fname, full_path # keep existing path
# scatter files across 256 dirs
# we use '/' in the db (even on windows)
fname = sha[:2] + '/' + sha
full_path = self._full_path(cr, uid, fname)
dirname = os.path.dirname(full_path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
return fname, full_path
def _file_read(self, cr, uid, fname, bin_size=False):
full_path = self._full_path(cr, uid, fname)
r = ''
try:
if bin_size:
r = os.path.getsize(full_path)
else:
r = open(full_path,'rb').read().encode('base64')
except IOError:
_logger.exception("_read_file reading %s", full_path)
return r
def _file_write(self, cr, uid, value):
bin_value = value.decode('base64')
fname, full_path = self._get_path(cr, uid, bin_value)
if not os.path.exists(full_path):
try:
with open(full_path, 'wb') as fp:
fp.write(bin_value)
except IOError:
_logger.exception("_file_write writing %s", full_path)
return fname
def _file_delete(self, cr, uid, fname):
# using SQL to include files hidden through unlink or due to record rules
cr.execute("SELECT COUNT(*) FROM ir_attachment WHERE store_fname = %s", (fname,))
count = cr.fetchone()[0]
full_path = self._full_path(cr, uid, fname)
if not count and os.path.exists(full_path):
try:
os.unlink(full_path)
except OSError:
_logger.exception("_file_delete could not unlink %s", full_path)
except IOError:
# Harmless and needed for race conditions
_logger.exception("_file_delete could not unlink %s", full_path)
def _data_get(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
result = {}
bin_size = context.get('bin_size')
for attach in self.browse(cr, uid, ids, context=context):
if attach.store_fname:
result[attach.id] = self._file_read(cr, uid, attach.store_fname, bin_size)
else:
result[attach.id] = attach.db_datas
return result
def _data_set(self, cr, uid, id, name, value, arg, context=None):
# We dont handle setting data to null
if not value:
return True
if context is None:
context = {}
location = self._storage(cr, uid, context)
file_size = len(value.decode('base64'))
attach = self.browse(cr, uid, id, context=context)
fname_to_delete = attach.store_fname
if location != 'db':
fname = self._file_write(cr, uid, value)
# SUPERUSER_ID as probably don't have write access, trigger during create
super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'store_fname': fname, 'file_size': file_size, 'db_datas': False}, context=context)
else:
super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'db_datas': value, 'file_size': file_size, 'store_fname': False}, context=context)
# After de-referencing the file in the database, check whether we need
# to garbage-collect it on the filesystem
if fname_to_delete:
self._file_delete(cr, uid, fname_to_delete)
return True
_name = 'ir.attachment'
_columns = {
'name': fields.char('Attachment Name', required=True),
'datas_fname': fields.char('File Name'),
'description': fields.text('Description'),
'res_name': fields.function(_name_get_resname, type='char', string='Resource Name', store=True),
'res_model': fields.char('Resource Model', readonly=True, help="The database object this attachment will be attached to"),
'res_id': fields.integer('Resource ID', readonly=True, help="The record id this is attached to"),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Owner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', change_default=True),
'type': fields.selection( [ ('url','URL'), ('binary','Binary'), ],
'Type', help="Binary File or URL", required=True, change_default=True),
'url': fields.char('Url', size=1024),
# al: We keep shitty field names for backward compatibility with document
'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
'store_fname': fields.char('Stored Filename'),
'db_datas': fields.binary('Database Data'),
'file_size': fields.integer('File Size'),
}
_defaults = {
'type': 'binary',
'file_size': 0,
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c),
}
def _auto_init(self, cr, context=None):
super(ir_attachment, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
cr.commit()
def check(self, cr, uid, ids, mode, context=None, values=None):
"""Restricts the access to an ir.attachment, according to referred model
In the 'document' module, it is overriden to relax this hard rule, since
more complex ones apply there.
"""
res_ids = {}
require_employee = False
if ids:
if isinstance(ids, (int, long)):
ids = [ids]
cr.execute('SELECT DISTINCT res_model, res_id, create_uid FROM ir_attachment WHERE id = ANY (%s)', (ids,))
for rmod, rid, create_uid in cr.fetchall():
if not (rmod and rid):
if create_uid != uid:
require_employee = True
continue
res_ids.setdefault(rmod,set()).add(rid)
if values:
if values.get('res_model') and values.get('res_id'):
res_ids.setdefault(values['res_model'],set()).add(values['res_id'])
ima = self.pool.get('ir.model.access')
for model, mids in res_ids.items():
# ignore attachments that are not attached to a resource anymore when checking access rights
# (resource was deleted but attachment was not)
if not self.pool.get(model):
require_employee = True
continue
existing_ids = self.pool[model].exists(cr, uid, mids)
if len(existing_ids) != len(mids):
require_employee = True
ima.check(cr, uid, model, mode)
self.pool[model].check_access_rule(cr, uid, existing_ids, mode, context=context)
if require_employee:
if not uid == SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_user'):
raise except_orm(_('Access Denied'), _("Sorry, you are not allowed to access this document."))
def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
ids = super(ir_attachment, self)._search(cr, uid, args, offset=offset,
limit=limit, order=order,
context=context, count=False,
access_rights_uid=access_rights_uid)
if not ids:
if count:
return 0
return []
# Work with a set, as list.remove() is prohibitive for large lists of documents
# (takes 20+ seconds on a db with 100k docs during search_count()!)
orig_ids = ids
ids = set(ids)
# For attachments, the permissions of the document they are attached to
# apply, so we must remove attachments for which the user cannot access
# the linked document.
# Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
# and the permissions are checked in super() and below anyway.
cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),))
targets = cr.dictfetchall()
model_attachments = {}
for target_dict in targets:
if not target_dict['res_model']:
continue
# model_attachments = { 'model': { 'res_id': [id1,id2] } }
model_attachments.setdefault(target_dict['res_model'],{}).setdefault(target_dict['res_id'] or 0, set()).add(target_dict['id'])
# To avoid multiple queries for each attachment found, checks are
# performed in batch as much as possible.
ima = self.pool.get('ir.model.access')
for model, targets in model_attachments.iteritems():
if model not in self.pool:
continue
if not ima.check(cr, uid, model, 'read', False):
# remove all corresponding attachment ids
for attach_id in itertools.chain(*targets.values()):
ids.remove(attach_id)
continue # skip ir.rule processing, these ones are out already
# filter ids according to what access rules permit
target_ids = targets.keys()
allowed_ids = [0] + self.pool[model].search(cr, uid, [('id', 'in', target_ids)], context=context)
disallowed_ids = set(target_ids).difference(allowed_ids)
for res_id in disallowed_ids:
for attach_id in targets[res_id]:
ids.remove(attach_id)
# sort result according to the original sort ordering
result = [id for id in orig_ids if id in ids]
return len(result) if count else list(result)
def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'):
if isinstance(ids, (int, long)):
ids = [ids]
self.check(cr, uid, ids, 'read', context=context)
return super(ir_attachment, self).read(cr, uid, ids, fields_to_read, context=context, load=load)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
self.check(cr, uid, ids, 'write', context=context, values=vals)
if 'file_size' in vals:
del vals['file_size']
return super(ir_attachment, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
self.check(cr, uid, [id], 'write', context=context)
return super(ir_attachment, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
self.check(cr, uid, ids, 'unlink', context=context)
# First delete in the database, *then* in the filesystem if the
# database allowed it. Helps avoid errors when concurrent transactions
# are deleting the same file, and some of the transactions are
# rolled back by PostgreSQL (due to concurrent updates detection).
to_delete = [a.store_fname
for a in self.browse(cr, uid, ids, context=context)
if a.store_fname]
res = super(ir_attachment, self).unlink(cr, uid, ids, context)
for file_path in to_delete:
self._file_delete(cr, uid, file_path)
return res
def create(self, cr, uid, values, context=None):
self.check(cr, uid, [], mode='write', context=context, values=values)
if 'file_size' in values:
del values['file_size']
return super(ir_attachment, self).create(cr, uid, values, context)
def action_get(self, cr, uid, context=None):
return self.pool.get('ir.actions.act_window').for_xml_id(
cr, uid, 'base', 'action_attachment', context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tedsunnyday/SE-Server | server/lib/jinja2/compiler.py | 121 | 61899 | # -*- coding: utf-8 -*-
"""
jinja2.compiler
~~~~~~~~~~~~~~~
Compiles nodes into python code.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from cStringIO import StringIO
from itertools import chain
from copy import deepcopy
from jinja2 import nodes
from jinja2.nodes import EvalContext
from jinja2.visitor import NodeVisitor
from jinja2.exceptions import TemplateAssertionError
from jinja2.utils import Markup, concat, escape, is_python_keyword, next
operators = {
'eq': '==',
'ne': '!=',
'gt': '>',
'gteq': '>=',
'lt': '<',
'lteq': '<=',
'in': 'in',
'notin': 'not in'
}
try:
exec '(0 if 0 else 0)'
except SyntaxError:
have_condexpr = False
else:
have_condexpr = True
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, 'iteritems'):
dict_item_iter = 'iteritems'
else:
dict_item_iter = 'items'
# does if 0: dummy(x) get us x into the scope?
def unoptimize_before_dead_code():
x = 42
def f():
if 0: dummy(x)
return f
unoptimize_before_dead_code = bool(unoptimize_before_dead_code().func_closure)
def generate(node, environment, name, filename, stream=None,
defer_init=False):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = CodeGenerator(environment, name, filename, stream, defer_init)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if isinstance(value, (bool, int, long, float, complex, basestring,
xrange, Markup)):
return True
if isinstance(value, (tuple, list, set, frozenset)):
for item in value:
if not has_safe_repr(item):
return False
return True
elif isinstance(value, dict):
for key, value in value.iteritems():
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class Identifiers(object):
"""Tracks the status of identifiers in frames."""
def __init__(self):
# variables that are known to be declared (probably from outer
# frames or because they are special for the frame)
self.declared = set()
# undeclared variables from outer scopes
self.outer_undeclared = set()
# names that are accessed without being explicitly declared by
# this one or any of the outer scopes. Names can appear both in
# declared and undeclared.
self.undeclared = set()
# names that are declared locally
self.declared_locally = set()
# names that are declared by parameters
self.declared_parameter = set()
def add_special(self, name):
"""Register a special name like `loop`."""
self.undeclared.discard(name)
self.declared.add(name)
def is_declared(self, name):
"""Check if a name is declared in this or an outer scope."""
if name in self.declared_locally or name in self.declared_parameter:
return True
return name in self.declared
def copy(self):
return deepcopy(self)
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None):
self.eval_ctx = eval_ctx
self.identifiers = Identifiers()
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# a set of actually assigned names
self.assigned_names = set()
# the parent of this frame
self.parent = parent
if parent is not None:
self.identifiers.declared.update(
parent.identifiers.declared |
parent.identifiers.declared_parameter |
parent.assigned_names
)
self.identifiers.outer_undeclared.update(
parent.identifiers.undeclared -
self.identifiers.declared
)
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.identifiers = object.__new__(self.identifiers.__class__)
rv.identifiers.__dict__.update(self.identifiers.__dict__)
return rv
def inspect(self, nodes):
"""Walk the node and check for identifiers. If the scope is hard (eg:
enforce on a python level) overrides from outer scopes are tracked
differently.
"""
visitor = FrameIdentifierVisitor(self.identifiers)
for node in nodes:
visitor.visit(node)
def find_shadowed(self, extra=()):
"""Find all the shadowed names. extra is an iterable of variables
that may be defined with `add_special` which may occour scoped.
"""
i = self.identifiers
return (i.declared | i.outer_undeclared) & \
(i.declared_locally | i.declared_parameter) | \
set(x for x in extra if i.is_declared(x))
def inner(self):
"""Return an inner frame."""
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class FrameIdentifierVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, identifiers):
self.identifiers = identifiers
def visit_Name(self, node):
"""All assignments to names go through this function."""
if node.ctx == 'store':
self.identifiers.declared_locally.add(node.name)
elif node.ctx == 'param':
self.identifiers.declared_parameter.add(node.name)
elif node.ctx == 'load' and not \
self.identifiers.is_declared(node.name):
self.identifiers.undeclared.add(node.name)
def visit_If(self, node):
self.visit(node.test)
real_identifiers = self.identifiers
old_names = real_identifiers.declared_locally | \
real_identifiers.declared_parameter
def inner_visit(nodes):
if not nodes:
return set()
self.identifiers = real_identifiers.copy()
for subnode in nodes:
self.visit(subnode)
rv = self.identifiers.declared_locally - old_names
# we have to remember the undeclared variables of this branch
# because we will have to pull them.
real_identifiers.undeclared.update(self.identifiers.undeclared)
self.identifiers = real_identifiers
return rv
body = inner_visit(node.body)
else_ = inner_visit(node.else_ or ())
# the differences between the two branches are also pulled as
# undeclared variables
real_identifiers.undeclared.update(body.symmetric_difference(else_) -
real_identifiers.declared)
# remember those that are declared.
real_identifiers.declared_locally.update(body | else_)
def visit_Macro(self, node):
self.identifiers.declared_locally.add(node.name)
def visit_Import(self, node):
self.generic_visit(node)
self.identifiers.declared_locally.add(node.target)
def visit_FromImport(self, node):
self.generic_visit(node)
for name in node.names:
if isinstance(name, tuple):
self.identifiers.declared_locally.add(name[1])
else:
self.identifiers.declared_locally.add(name)
def visit_Assign(self, node):
"""Visit assignments in the correct order."""
self.visit(node.node)
self.visit(node.target)
def visit_For(self, node):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter)
def visit_CallBlock(self, node):
self.visit(node.call)
def visit_FilterBlock(self, node):
self.visit(node.filter)
def visit_Scope(self, node):
"""Stop visiting at scopes."""
def visit_Block(self, node):
"""Stop visiting at blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None,
defer_init=False):
if stream is None:
stream = StringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame):
"""Return the buffer contents of the frame."""
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
else:
self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically
unless the force_generator parameter is set to False.
"""
if frame.buffer is None:
self.writeline('if 0: yield None')
else:
self.writeline('pass')
try:
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in extra_kwargs.iteritems():
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in extra_kwargs.iteritems():
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_locals(self, frame):
"""Pull all the references identifiers into the local scope."""
for name in frame.identifiers.undeclared:
self.writeline('l_%s = context.resolve(%r)' % (name, name))
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name))
def unoptimize_scope(self, frame):
"""Disable Python optimizations for the frame."""
# XXX: this is not that nice but it has no real overhead. It
# mainly works because python finds the locals before dead code
# is removed. If that breaks we have to add a dummy function
# that just accepts the arguments and does nothing.
if frame.identifiers.declared:
self.writeline('%sdummy(%s)' % (
unoptimize_before_dead_code and 'if 0: ' or '',
', '.join('l_' + name for name in frame.identifiers.declared)
))
def push_scope(self, frame, extra_vars=()):
"""This function returns all the shadowed variables in a dict
in the form name: alias and will write the required assignments
into the current scope. No indentation takes place.
This also predefines locally declared variables from the loop
body because under some circumstances it may be the case that
`extra_vars` is passed to `Frame.find_shadowed`.
"""
aliases = {}
for name in frame.find_shadowed(extra_vars):
aliases[name] = ident = self.temporary_identifier()
self.writeline('%s = l_%s' % (ident, name))
to_declare = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_declare.add('l_' + name)
if to_declare:
self.writeline(' = '.join(to_declare) + ' = missing')
return aliases
def pop_scope(self, aliases, frame):
"""Restore all aliases and delete unused variables."""
for name, alias in aliases.iteritems():
self.writeline('l_%s = %s' % (name, alias))
to_delete = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_delete.add('l_' + name)
if to_delete:
# we cannot use the del statement here because enclosed
# scopes can trigger a SyntaxError:
# a = 42; b = lambda: a; del a
self.writeline(' = '.join(to_delete) + ' = missing')
def function_scoping(self, node, frame, children=None,
find_special=True):
"""In Jinja a few statements require the help of anonymous
functions. Those are currently macros and call blocks and in
the future also recursive loops. As there is currently
technical limitation that doesn't allow reading and writing a
variable in a scope where the initial value is coming from an
outer scope, this function tries to fall back with a common
error message. Additionally the frame passed is modified so
that the argumetns are collected and callers are looked up.
This will return the modified frame.
"""
# we have to iterate twice over it, make sure that works
if children is None:
children = node.iter_child_nodes()
children = list(children)
func_frame = frame.inner()
func_frame.inspect(children)
# variables that are undeclared (accessed before declaration) and
# declared locally *and* part of an outside scope raise a template
# assertion error. Reason: we can't generate reasonable code from
# it without aliasing all the variables.
# this could be fixed in Python 3 where we have the nonlocal
# keyword or if we switch to bytecode generation
overriden_closure_vars = (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared &
(func_frame.identifiers.declared_locally |
func_frame.identifiers.declared_parameter)
)
if overriden_closure_vars:
self.fail('It\'s not possible to set and access variables '
'derived from an outer scope! (affects: %s)' %
', '.join(sorted(overriden_closure_vars)), node.lineno)
# remove variables from a closure from the frame's undeclared
# identifiers.
func_frame.identifiers.undeclared -= (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared
)
# no special variables for this scope, abort early
if not find_special:
return func_frame
func_frame.accesses_kwargs = False
func_frame.accesses_varargs = False
func_frame.accesses_caller = False
func_frame.arguments = args = ['l_' + x.name for x in node.args]
undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))
if 'caller' in undeclared:
func_frame.accesses_caller = True
func_frame.identifiers.add_special('caller')
args.append('l_caller')
if 'kwargs' in undeclared:
func_frame.accesses_kwargs = True
func_frame.identifiers.add_special('kwargs')
args.append('l_kwargs')
if 'varargs' in undeclared:
func_frame.accesses_varargs = True
func_frame.identifiers.add_special('varargs')
args.append('l_varargs')
return func_frame
def macro_body(self, node, frame, children=None):
"""Dump the function def of a macro or call block."""
frame = self.function_scoping(node, frame, children)
# macros are delayed, they never require output checks
frame.require_output_check = False
args = frame.arguments
# XXX: this is an ugly fix for the loop nesting bug
# (tests.test_old_bugs.test_loop_call_bug). This works around
# a identifier nesting problem we have in general. It's just more
# likely to happen in loops which is why we work around it. The
# real solution would be "nonlocal" all the identifiers that are
# leaking into a new python frame and might be used both unassigned
# and assigned.
if 'loop' in frame.identifiers.declared:
args = args + ['l_loop=l_loop']
self.writeline('def macro(%s):' % ', '.join(args), node)
self.indent()
self.buffer(frame)
self.pull_locals(frame)
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame)
self.outdent()
return frame
def macro_def(self, node, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ', '.join(repr(x.name) for x in node.args)
name = getattr(node, 'name', None)
if len(node.args) == 1:
arg_tuple += ','
self.write('Macro(environment, macro, %r, (%s), (' %
(name, arg_tuple))
for arg in node.defaults:
self.visit(arg, frame)
self.write(', ')
self.write('), %r, %r, %r)' % (
bool(frame.accesses_kwargs),
bool(frame.accesses_varargs),
bool(frame.accesses_caller)
))
def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import division')
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if not unoptimize_before_dead_code:
self.writeline('dummy = lambda *x: None')
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if '.' in imp:
module, obj = imp.rsplit('.', 1)
self.writeline('from %s import %s as %s' %
(module, obj, alias))
else:
self.writeline('import %s as %s' % (imp, alias))
# add the load name
self.writeline('name = %r' % self.name)
# generate the root render function.
self.writeline('def root(context%s):' % envenv, extra=1)
# process the root
frame = Frame(eval_ctx)
frame.inspect(node.body)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
self.indent()
if have_extends:
self.writeline('parent_template = None')
if 'self' in find_undeclared(node.body, ('self',)):
frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
self.pull_locals(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('for event in parent_template.'
'root_render_func(context):')
self.indent()
self.writeline('yield event')
self.outdent(2 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in self.blocks.iteritems():
block_frame = Frame(eval_ctx)
block_frame.inspect(block.body)
block_frame.block = name
self.writeline('def block_%s(context%s):' % (name, envenv),
block, 1)
self.indent()
undeclared = find_undeclared(block.body, ('self', 'super'))
if 'self' in undeclared:
block_frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
if 'super' in undeclared:
block_frame.identifiers.add_special('super')
self.writeline('l_super = context.super(%r, '
'block_%s)' % (name, name))
self.pull_locals(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.outdent()
self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
for x in self.blocks),
extra=1)
# add a function that returns the debug info
self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 1
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
context = node.scoped and 'context.derived(locals())' or 'context'
self.writeline('for event in context.blocks[%r][0](%s):' % (
node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
self.outdent()
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.with_context:
self.unoptimize_scope(frame)
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, basestring):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
if node.with_context:
self.writeline('for event in template.root_render_func('
'template.new_context(context.parent, True, '
'locals())):')
else:
self.writeline('for event in template.module._body_stream:')
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
if node.with_context:
self.unoptimize_scope(frame)
self.writeline('l_%s = ' % node.target, node)
if frame.toplevel:
self.write('context.vars[%r] = ' % node.target)
self.write('environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True, locals())')
else:
self.write('module')
if frame.toplevel and not node.target.startswith('_'):
self.writeline('context.exported_vars.discard(%r)' % node.target)
frame.assigned_names.add(node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True)')
else:
self.write('module')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('l_%s = getattr(included_template, '
'%r, missing)' % (alias, name))
self.writeline('if l_%s is missing:' % alias)
self.indent()
self.writeline('l_%s = environment.undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(alias, 'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
frame.assigned_names.add(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: l_%s' % (name, name) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(map(repr, discarded_names)))
def visit_For(self, node, frame):
# when calculating the nodes for the inner frame we have to exclude
# the iterator contents from it
children = node.iter_child_nodes(exclude=('iter',))
if node.recursive:
loop_frame = self.function_scoping(node, frame, children,
find_special=False)
else:
loop_frame = frame.inner()
loop_frame.inspect(children)
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or 'loop' in \
find_undeclared(node.iter_child_nodes(
only=('body',)), ('loop',))
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if not node.recursive:
aliases = self.push_scope(loop_frame, ('loop',))
# otherwise we set up a buffer and add a function def
else:
self.writeline('def loop(reciter, loop_render_func):', node)
self.indent()
self.buffer(loop_frame)
aliases = {}
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
loop_frame.identifiers.add_special('loop')
for name in node.find_all(nodes.Name):
if name.ctx == 'store' and name.name == 'loop':
self.fail('Can\'t assign to special loop variable '
'in for-loop target', name.lineno)
self.pull_locals(loop_frame)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline('%s = 1' % iteration_indicator)
# Create a fake parent loop if the else or test section of a
# loop is accessing the special loop variable and no parent loop
# exists.
if 'loop' not in aliases and 'loop' in find_undeclared(
node.iter_child_nodes(only=('else_', 'test')), ('loop',)):
self.writeline("l_loop = environment.undefined(%r, name='loop')" %
("'loop' is undefined. the filter section of a loop as well "
"as the else block don't have access to the special 'loop'"
" variable of the current loop. Because there is no parent "
"loop it's undefined. Happened in loop on %s" %
self.position(node)))
self.writeline('for ', node)
self.visit(node.target, loop_frame)
self.write(extended_loop and ', l_loop in LoopContext(' or ' in ')
# if we have an extened loop and a node test, we filter in the
# "outer frame".
if extended_loop and node.test is not None:
self.write('(')
self.visit(node.target, loop_frame)
self.write(' for ')
self.visit(node.target, loop_frame)
self.write(' in ')
if node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
self.write(' if (')
test_frame = loop_frame.copy()
self.visit(node.test, test_frame)
self.write('))')
elif node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
if node.recursive:
self.write(', recurse=loop_render_func):')
else:
self.write(extended_loop and '):' or ':')
# tests in not extended loops become a continue
if not extended_loop and node.test is not None:
self.indent()
self.writeline('if not ')
self.visit(node.test, loop_frame)
self.write(':')
self.indent()
self.writeline('continue')
self.outdent(2)
self.indent()
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline('%s = 0' % iteration_indicator)
self.outdent()
if node.else_:
self.writeline('if %s:' % iteration_indicator)
self.indent()
self.blockvisit(node.else_, loop_frame)
self.outdent()
# reset the aliases if there are any.
if not node.recursive:
self.pop_scope(aliases, loop_frame)
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
self.write('loop(')
self.visit(node.iter, frame)
self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline('if ', node)
self.visit(node.test, if_frame)
self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
if node.else_:
self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith('_'):
self.write('context.exported_vars.add(%r)' % node.name)
self.writeline('context.vars[%r] = ' % node.name)
self.write('l_%s = ' % node.name)
self.macro_def(node, macro_frame)
frame.assigned_names.add(node.name)
def visit_CallBlock(self, node, frame):
children = node.iter_child_nodes(exclude=('call',))
call_frame = self.macro_body(node, frame, children)
self.writeline('caller = ')
self.macro_def(node, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, call_frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(filter_frame)
self.pull_locals(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.pop_scope(aliases, filter_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
def visit_Output(self, node, frame):
# if we have a known extends statement, we don't output anything
# if we are in a require_output_check section
if self.has_known_extends and frame.require_output_check:
return
if self.environment.finalize:
finalize = lambda x: unicode(self.environment.finalize(x))
else:
finalize = unicode
# if we are inside a frame that requires output checking, we do so
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# try to evaluate as many chunks as possible into a static
# string at compile time.
body = []
for child in node.nodes:
try:
const = child.as_const(frame.eval_ctx)
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, becaus otherwise the
# as_const() function would raise an Impossible exception
# at that point.
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = finalize(const)
except Exception:
# if something goes wrong here we evaluate the node
# at runtime for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ', ')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 1
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
elif frame.eval_ctx.autoescape:
self.write('escape(')
else:
self.write('to_string(')
if self.environment.finalize is not None:
self.write('environment.finalize(')
close += 1
self.visit(item, frame)
self.write(')' * close)
if frame.buffer is not None:
self.write(', ')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
idx = -1
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
close += 1
elif frame.eval_ctx.autoescape:
self.write('escape(')
close += 1
if self.environment.finalize is not None:
self.write('environment.finalize(')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
def visit_Assign(self, node, frame):
self.newline(node)
# toplevel assignments however go into the local namespace and
# the current template's context. We create a copy of the frame
# here and add a set so that the Name visitor can add the assigned
# names here.
if frame.toplevel:
assignment_frame = frame.copy()
assignment_frame.toplevel_assignments = set()
else:
assignment_frame = frame
self.visit(node.target, assignment_frame)
self.write(' = ')
self.visit(node.node, frame)
# make sure toplevel assignments are added to the context.
if frame.toplevel:
public_names = [x for x in assignment_frame.toplevel_assignments
if not x.startswith('_')]
if len(assignment_frame.toplevel_assignments) == 1:
name = next(iter(assignment_frame.toplevel_assignments))
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(assignment_frame.toplevel_assignments):
if idx:
self.write(', ')
self.write('%r: l_%s' % (name, name))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(map(repr, public_names)))
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == 'store' and frame.toplevel:
frame.toplevel_assignments.add(node.name)
self.write('l_' + node.name)
frame.assigned_names.add(node.name)
def visit_Const(self, node, frame):
val = node.value
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)'
% node.data)
def visit_Tuple(self, node, frame):
self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
self.write('[')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(']')
def visit_Dict(self, node, frame):
self.write('{')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item.key, frame)
self.write(': ')
self.visit(item.value, frame)
self.write('}')
def binop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_binops:
self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
self.write(', ')
self.visit(node.right, frame)
else:
self.write('(')
self.visit(node.left, frame)
self.write(' %s ' % operator)
self.visit(node.right, frame)
self.write(')')
return visitor
def uaop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_unops:
self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
self.write('(' + operator)
self.visit(node.node, frame)
self.write(')')
return visitor
visit_Add = binop('+')
visit_Sub = binop('-')
visit_Mul = binop('*')
visit_Div = binop('/')
visit_FloorDiv = binop('//')
visit_Pow = binop('**')
visit_Mod = binop('%')
visit_And = binop('and', interceptable=False)
visit_Or = binop('or', interceptable=False)
visit_Pos = uaop('+')
visit_Neg = uaop('-')
visit_Not = uaop('not ', interceptable=False)
del binop, uaop
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = '(context.eval_ctx.volatile and' \
' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
func_name = 'markup_join'
else:
func_name = 'unicode_join'
self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(', ')
self.write('))')
def visit_Compare(self, node, frame):
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
def visit_Operand(self, node, frame):
self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
def visit_Getattr(self, node, frame):
self.write('environment.getattr(')
self.visit(node.node, frame)
self.write(', %r)' % node.attr)
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write('[')
self.visit(node.arg, frame)
self.write(']')
else:
self.write('environment.getitem(')
self.visit(node.node, frame)
self.write(', ')
self.visit(node.arg, frame)
self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(':')
self.visit(node.step, frame)
def visit_Filter(self, node, frame):
self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
self.fail('no filter named %r' % node.name, node.lineno)
if getattr(func, 'contextfilter', False):
self.write('context, ')
elif getattr(func, 'evalcontextfilter', False):
self.write('context.eval_ctx, ')
elif getattr(func, 'environmentfilter', False):
self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' Markup(concat(%s)) or concat(%s))' %
(frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
self.write('Markup(concat(%s))' % frame.buffer)
else:
self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
self.write(')')
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(')')
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write('environment.undefined(%r)' % ('the inline if-'
'expression on %s evaluated to false and '
'no else section was defined.' % self.position(node)))
if not have_condexpr:
self.write('((')
self.visit(node.test, frame)
self.write(') and (')
self.visit(node.expr1, frame)
self.write(',) or (')
write_expr2()
self.write(',))[0]')
else:
self.write('(')
self.visit(node.expr1, frame)
self.write(' if ')
self.visit(node.test, frame)
self.write(' else ')
write_expr2()
self.write(')')
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.sandboxed:
self.write('environment.call(context, ')
else:
self.write('context.call(')
self.visit(node.node, frame)
extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
self.write(')')
def visit_Keyword(self, node, frame):
self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write('Markup(')
self.visit(node.expr, frame)
self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write('context')
def visit_Continue(self, node, frame):
self.writeline('continue', node)
def visit_Break(self, node, frame):
self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(scope_frame)
self.pull_locals(scope_frame)
self.blockvisit(node.body, scope_frame)
self.pop_scope(aliases, scope_frame)
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
safed_ctx = frame.eval_ctx.save()
self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(safed_ctx)
self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
| apache-2.0 |
eenchev/idea-note-taking-app | env/lib/python2.7/site-packages/psycopg2/tests/test_async_keyword.py | 7 | 7351 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_async_keyword.py - test for objects using 'async' as attribute/param
#
# Copyright (C) 2017 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import time
import psycopg2
from psycopg2 import extras
from testconfig import dsn
from testutils import ConnectingTestCase, unittest, skip_before_postgres, slow
from test_replication import ReplicationTestCase, skip_repl_if_green
from psycopg2.extras import LogicalReplicationConnection, StopReplication
class AsyncTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self.sync_conn = self.conn
self.conn = self.connect(async=True)
self.wait(self.conn)
curs = self.conn.cursor()
curs.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
self.wait(curs)
def test_connection_setup(self):
cur = self.conn.cursor()
sync_cur = self.sync_conn.cursor()
del cur, sync_cur
self.assert_(self.conn.async)
self.assert_(not self.sync_conn.async)
# the async connection should be autocommit
self.assert_(self.conn.autocommit)
# check other properties to be found on the connection
self.assert_(self.conn.server_version)
self.assert_(self.conn.protocol_version in (2, 3))
self.assert_(self.conn.encoding in psycopg2.extensions.encodings)
def test_async_subclass(self):
class MyConn(psycopg2.extensions.connection):
def __init__(self, dsn, async=0):
psycopg2.extensions.connection.__init__(self, dsn, async=async)
conn = self.connect(connection_factory=MyConn, async=True)
self.assert_(isinstance(conn, MyConn))
self.assert_(conn.async)
conn.close()
def test_async_connection_error_message(self):
try:
cnn = psycopg2.connect('dbname=thisdatabasedoesntexist', async=True)
self.wait(cnn)
except psycopg2.Error, e:
self.assertNotEqual(str(e), "asynchronous connection failed",
"connection error reason lost")
else:
self.fail("no exception raised")
class CancelTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
cur = self.conn.cursor()
cur.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
self.conn.commit()
@slow
@skip_before_postgres(8, 2)
def test_async_cancel(self):
async_conn = psycopg2.connect(dsn, async=True)
self.assertRaises(psycopg2.OperationalError, async_conn.cancel)
extras.wait_select(async_conn)
cur = async_conn.cursor()
cur.execute("select pg_sleep(10)")
time.sleep(1)
self.assertTrue(async_conn.isexecuting())
async_conn.cancel()
self.assertRaises(psycopg2.extensions.QueryCanceledError,
extras.wait_select, async_conn)
cur.execute("select 1")
extras.wait_select(async_conn)
self.assertEqual(cur.fetchall(), [(1, )])
def test_async_connection_cancel(self):
async_conn = psycopg2.connect(dsn, async=True)
async_conn.close()
self.assertTrue(async_conn.closed)
class ConnectTestCase(unittest.TestCase):
def setUp(self):
self.args = None
def connect_stub(dsn, connection_factory=None, async=False):
self.args = (dsn, connection_factory, async)
self._connect_orig = psycopg2._connect
psycopg2._connect = connect_stub
def tearDown(self):
psycopg2._connect = self._connect_orig
def test_there_has_to_be_something(self):
self.assertRaises(TypeError, psycopg2.connect)
self.assertRaises(TypeError, psycopg2.connect,
connection_factory=lambda dsn, async=False: None)
self.assertRaises(TypeError, psycopg2.connect,
async=True)
def test_factory(self):
def f(dsn, async=False):
pass
psycopg2.connect(database='foo', host='baz', connection_factory=f)
self.assertDsnEqual(self.args[0], 'dbname=foo host=baz')
self.assertEqual(self.args[1], f)
self.assertEqual(self.args[2], False)
psycopg2.connect("dbname=foo host=baz", connection_factory=f)
self.assertDsnEqual(self.args[0], 'dbname=foo host=baz')
self.assertEqual(self.args[1], f)
self.assertEqual(self.args[2], False)
def test_async(self):
psycopg2.connect(database='foo', host='baz', async=1)
self.assertDsnEqual(self.args[0], 'dbname=foo host=baz')
self.assertEqual(self.args[1], None)
self.assert_(self.args[2])
psycopg2.connect("dbname=foo host=baz", async=True)
self.assertDsnEqual(self.args[0], 'dbname=foo host=baz')
self.assertEqual(self.args[1], None)
self.assert_(self.args[2])
class AsyncReplicationTest(ReplicationTestCase):
@skip_before_postgres(9, 4) # slots require 9.4
@skip_repl_if_green
def test_async_replication(self):
conn = self.repl_connect(
connection_factory=LogicalReplicationConnection, async=1)
if conn is None:
return
cur = conn.cursor()
self.create_replication_slot(cur, output_plugin='test_decoding')
self.wait(cur)
cur.start_replication(self.slot)
self.wait(cur)
self.make_replication_events()
self.msg_count = 0
def consume(msg):
# just check the methods
"%s: %s" % (cur.io_timestamp, repr(msg))
self.msg_count += 1
if self.msg_count > 3:
cur.send_feedback(reply=True)
raise StopReplication()
cur.send_feedback(flush_lsn=msg.data_start)
# cannot be used in asynchronous mode
self.assertRaises(psycopg2.ProgrammingError, cur.consume_stream, consume)
def process_stream():
from select import select
while True:
msg = cur.read_message()
if msg:
consume(msg)
else:
select([cur], [], [])
self.assertRaises(StopReplication, process_stream)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit |
deepakkv07/Implementation-of-UDP-Lite-in-ns-3 | src/core/examples/sample-simulator.py | 34 | 2387 | # -*- Mode:Python; -*-
# /*
# * Copyright (c) 2010 INRIA
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# *
# * Authors: Mathieu Lacage <mathieu.lacage@sophia.inria.fr>
# */
#
# Python version of sample-simulator.cc
import ns.core
class MyModel(object):
"""Simple model object to illustrate event handling."""
## \returns None.
def Start(self):
"""Start model execution by scheduling a HandleEvent."""
ns.core.Simulator.Schedule(ns.core.Seconds(10.0), self.HandleEvent, ns.core.Simulator.Now().GetSeconds())
## \param [in] value Event argument.
## \return None.
def HandleEvent(self, value):
"""Simple event handler."""
print "Member method received event at", ns.core.Simulator.Now().GetSeconds(), \
"s started at", value, "s"
def ExampleFunction(model):
print "ExampleFunction received event at", ns.core.Simulator.Now().GetSeconds(), "s"
model.Start()
def RandomFunction(model):
print "RandomFunction received event at", ns.core.Simulator.Now().GetSeconds(), "s"
def CancelledEvent():
print "I should never be called... "
def main(dummy_argv):
ns.core.CommandLine().Parse(dummy_argv)
model = MyModel()
v = ns.core.UniformRandomVariable()
v.SetAttribute("Min", ns.core.DoubleValue (10))
v.SetAttribute("Max", ns.core.DoubleValue (20))
ns.core.Simulator.Schedule(ns.core.Seconds(10.0), ExampleFunction, model)
ns.core.Simulator.Schedule(ns.core.Seconds(v.GetValue()), RandomFunction, model)
id = ns.core.Simulator.Schedule(ns.core.Seconds(30.0), CancelledEvent)
ns.core.Simulator.Cancel(id)
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
if __name__ == '__main__':
import sys
main(sys.argv)
| gpl-2.0 |
TurboTurtle/sos | sos/report/plugins/jars.py | 3 | 5168 | # Copyright (C) 2016 Red Hat, Inc., Michal Srb <michal@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
import hashlib
import json
import os
import re
import zipfile
from functools import partial
from sos.report.plugins import Plugin, RedHatPlugin
class Jars(Plugin, RedHatPlugin):
short_desc = 'Collect information about available Java archives'
plugin_name = "jars"
version = "1.0.0"
profiles = ("java",)
option_list = [
("append_locations", "colon-separated list of additional JAR paths",
"fast", ""),
("all_known_locations", "scan all known paths", "slow", False)
]
# There is no standard location for JAR files and scanning
# the whole filesystem could be very slow. Therefore we only
# scan directories in which JARs can be typically found.
jar_locations = (
"/usr/share/java", # common location for JARs
"/usr/lib/java" # common location for JARs containing native code
)
# Following paths can be optionally scanned as well. Note the scan can take
# *very* long time.
extra_jar_locations = (
"/opt", # location for RHSCL and 3rd party software
"/usr/local", # used by sysadmins when installing SW locally
"/var/lib" # Java services commonly explode WARs there
)
def setup(self):
results = {"jars": []}
jar_paths = []
locations = list(Jars.jar_locations)
if self.get_option("all_known_locations"):
locations += list(Jars.extra_jar_locations)
# append also user-defined locations, if any
user_locations = self.get_option("append_locations")
if user_locations:
locations += user_locations.split(":")
# find all JARs in given locations
for location in locations:
for dirpath, _, filenames in os.walk(location):
for filename in filenames:
path = os.path.join(dirpath, filename)
if Jars.is_jar(path):
jar_paths.append(path)
# try to extract information about found JARs
for jar_path in jar_paths:
maven_id = Jars.get_maven_id(jar_path)
jar_id = Jars.get_jar_id(jar_path)
if maven_id or jar_id:
record = {"path": jar_path,
"sha1": jar_id,
"maven_id": maven_id
}
results["jars"].append(record)
results_str = json.dumps(results, indent=4, separators=(",", ": "))
self.add_string_as_file(results_str, "jars.json")
@staticmethod
def is_jar(path):
"""Check whether given file is a JAR file.
JARs are ZIP files which usually include a manifest
at the canonical location 'META-INF/MANIFEST.MF'.
"""
if os.path.isfile(path) and zipfile.is_zipfile(path):
try:
with zipfile.ZipFile(path) as f:
if "META-INF/MANIFEST.MF" in f.namelist():
return True
except (IOError, zipfile.BadZipfile):
pass
return False
@staticmethod
def get_maven_id(jar_path):
"""Extract Maven coordinates from a given JAR file, if possible.
JARs build by Maven (most popular Java build system) contain
'pom.properties' file. We can extract Maven coordinates
from there.
"""
props = {}
try:
with zipfile.ZipFile(jar_path) as f:
r = re.compile("META-INF/maven/[^/]+/[^/]+/pom.properties$")
result = [x for x in f.namelist() if r.match(x)]
if len(result) != 1:
return None
with f.open(result[0]) as props_f:
for line in props_f.readlines():
line = line.strip()
if not line.startswith(b"#"):
try:
(key, value) = line.split(b"=")
key = key.decode('utf8').strip()
value = value.decode('utf8').strip()
props[key] = value
except ValueError:
return None
except IOError:
pass
return props
@staticmethod
def get_jar_id(jar_path):
"""Compute JAR id.
Returns sha1 hash of a given JAR file.
"""
jar_id = ""
try:
with open(jar_path, mode="rb") as f:
m = hashlib.sha1()
for buf in iter(partial(f.read, 4096), b''):
m.update(buf)
jar_id = m.hexdigest()
except IOError:
pass
return jar_id
| gpl-2.0 |
hugobuddel/orange3 | Orange/tests/test_lazytable.py | 1 | 76434 | import os
import unittest
from itertools import chain, islice
from math import isnan
import random
from Orange import data
from Orange.data import filter, Variable
from Orange.data import Unknown
import numpy as np
from unittest.mock import Mock, MagicMock, patch
# GUI is necessary to get a LazyTable from an InfiniTable widget.
from PyQt4.QtGui import QApplication
from Orange.widgets.data.owinfinitable import OWInfiniTable
class TableTestCase(unittest.TestCase):
def setUp(self):
Variable._clear_all_caches()
#data.table.dataset_dirs.append("Orange/tests")
self.qApp = QApplication([])
self.widget_infinitable = OWInfiniTable()
# No automatic pulling
self.widget_infinitable.data.stop_pulling = True
self.data1 = self.widget_infinitable.data
def tearDown(self):
self.qApp.quit()
# def test_indexing_class(self):
# d = data.Table("test1")
# self.assertEqual([e.get_class() for e in d], ["t", "t", "f"])
# cind = len(d.domain) - 1
# self.assertEqual([e[cind] for e in d], ["t", "t", "f"])
# self.assertEqual([e["d"] for e in d], ["t", "t", "f"])
# cvar = d.domain.class_var
# self.assertEqual([e[cvar] for e in d], ["t", "t", "f"])
#
# def test_filename(self):
# dir = data.table.get_sample_datasets_dir()
# d = data.Table("iris")
# self.assertEqual(d.__file__, os.path.join(dir, "iris.tab"))
#
# d = data.Table("test2.tab")
# self.assertTrue(d.__file__.endswith("test2.tab")) # platform dependent
#
# def test_indexing(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
#
# # regular, discrete
# varc = d.domain["c"]
# self.assertEqual(d[0, 1], "0")
# self.assertEqual(d[0, varc], "0")
# self.assertEqual(d[0, "c"], "0")
# self.assertEqual(d[0][1], "0")
# self.assertEqual(d[0][varc], "0")
# self.assertEqual(d[0]["c"], "0")
# self.assertEqual(d[np.int_(0), np.int_(1)], "0")
# self.assertEqual(d[np.int_(0)][np.int_(1)], "0")
#
# # regular, continuous
# varb = d.domain["b"]
# self.assertEqual(d[0, 0], 0)
# self.assertEqual(d[0, varb], 0)
# self.assertEqual(d[0, "b"], 0)
# self.assertEqual(d[0][0], 0)
# self.assertEqual(d[0][varb], 0)
# self.assertEqual(d[0]["b"], 0)
# self.assertEqual(d[np.int_(0), np.int_(0)], 0)
# self.assertEqual(d[np.int_(0)][np.int_(0)], 0)
#
# # negative
# varb = d.domain["b"]
# self.assertEqual(d[-2, 0], 3.333)
# self.assertEqual(d[-2, varb], 3.333)
# self.assertEqual(d[-2, "b"], 3.333)
# self.assertEqual(d[-2][0], 3.333)
# self.assertEqual(d[-2][varb], 3.333)
# self.assertEqual(d[-2]["b"], 3.333)
# self.assertEqual(d[np.int_(-2), np.int_(0)], 3.333)
# self.assertEqual(d[np.int_(-2)][np.int_(0)], 3.333)
#
# # meta, discrete
# vara = d.domain["a"]
# metaa = d.domain.index("a")
# self.assertEqual(d[0, metaa], "A")
# self.assertEqual(d[0, vara], "A")
# self.assertEqual(d[0, "a"], "A")
# self.assertEqual(d[0][metaa], "A")
# self.assertEqual(d[0][vara], "A")
# self.assertEqual(d[0]["a"], "A")
# self.assertEqual(d[np.int_(0), np.int_(metaa)], "A")
# self.assertEqual(d[np.int_(0)][np.int_(metaa)], "A")
#
# # meta, string
# vare = d.domain["e"]
# metae = d.domain.index("e")
# self.assertEqual(d[0, metae], "i")
# self.assertEqual(d[0, vare], "i")
# self.assertEqual(d[0, "e"], "i")
# self.assertEqual(d[0][metae], "i")
# self.assertEqual(d[0][vare], "i")
# self.assertEqual(d[0]["e"], "i")
# self.assertEqual(d[np.int_(0), np.int_(metae)], "i")
# self.assertEqual(d[np.int_(0)][np.int_(metae)], "i")
#
# def test_indexing_example(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
# e = d[0]
#
# # regular, discrete
# varc = d.domain["c"]
# self.assertEqual(e[1], "0")
# self.assertEqual(e[varc], "0")
# self.assertEqual(e["c"], "0")
# self.assertEqual(e[np.int_(1)], "0")
#
# # regular, continuous
# varb = d.domain["b"]
# self.assertEqual(e[0], 0)
# self.assertEqual(e[varb], 0)
# self.assertEqual(e["b"], 0)
# self.assertEqual(e[np.int_(0)], 0)
#
# # meta, discrete
# vara = d.domain["a"]
# metaa = d.domain.index("a")
# self.assertEqual(e[metaa], "A")
# self.assertEqual(e[vara], "A")
# self.assertEqual(e["a"], "A")
# self.assertEqual(e[np.int_(metaa)], "A")
#
# # meta, string
# vare = d.domain["e"]
# metae = d.domain.index("e")
# self.assertEqual(e[metae], "i")
# self.assertEqual(e[vare], "i")
# self.assertEqual(e["e"], "i")
# self.assertEqual(e[np.int_(metae)], "i")
#
# def test_indexing_assign_value(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
#
# # meta
# vara = d.domain["a"]
# metaa = d.domain.index("a")
#
# self.assertEqual(d[0, "a"], "A")
# d[0, "a"] = "B"
# self.assertEqual(d[0, "a"], "B")
# d[0]["a"] = "A"
# self.assertEqual(d[0, "a"], "A")
#
# d[0, vara] = "B"
# self.assertEqual(d[0, "a"], "B")
# d[0][vara] = "A"
# self.assertEqual(d[0, "a"], "A")
#
# d[0, metaa] = "B"
# self.assertEqual(d[0, "a"], "B")
# d[0][metaa] = "A"
# self.assertEqual(d[0, "a"], "A")
#
# d[0, np.int_(metaa)] = "B"
# self.assertEqual(d[0, "a"], "B")
# d[0][np.int_(metaa)] = "A"
# self.assertEqual(d[0, "a"], "A")
#
# # regular
# varb = d.domain["b"]
#
# self.assertEqual(d[0, "b"], 0)
# d[0, "b"] = 42
# self.assertEqual(d[0, "b"], 42)
# d[0]["b"] = 0
# self.assertEqual(d[0, "b"], 0)
#
# d[0, varb] = 42
# self.assertEqual(d[0, "b"], 42)
# d[0][varb] = 0
# self.assertEqual(d[0, "b"], 0)
#
# d[0, 0] = 42
# self.assertEqual(d[0, "b"], 42)
# d[0][0] = 0
# self.assertEqual(d[0, "b"], 0)
#
# d[0, np.int_(0)] = 42
# self.assertEqual(d[0, "b"], 42)
# d[0][np.int_(0)] = 0
# self.assertEqual(d[0, "b"], 0)
#
# def test_indexing_del_example(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
# initlen = len(d)
#
# # remove first
# d[4, "e"] = "4ex"
# self.assertEqual(d[4, "e"], "4ex")
# del d[0]
# self.assertEqual(len(d), initlen - 1)
# self.assertEqual(d[3, "e"], "4ex")
#
# # remove middle
# del d[2]
# self.assertEqual(len(d), initlen - 2)
# self.assertEqual(d[2, "e"], "4ex")
#
# # remove middle
# del d[4]
# self.assertEqual(len(d), initlen - 3)
# self.assertEqual(d[2, "e"], "4ex")
#
# # remove last
# d[-1, "e"] = "was last"
# del d[-1]
# self.assertEqual(len(d), initlen - 4)
# self.assertEqual(d[2, "e"], "4ex")
# self.assertNotEqual(d[-1, "e"], "was last")
#
# # remove one before last
# d[-1, "e"] = "was last"
# del d[-2]
# self.assertEqual(len(d), initlen - 5)
# self.assertEqual(d[2, "e"], "4ex")
# self.assertEqual(d[-1, "e"], "was last")
#
# d[np.int_(2), "e"] = "2ex"
# del d[np.int_(2)]
# self.assertEqual(len(d), initlen - 6)
# self.assertNotEqual(d[2, "e"], "2ex")
#
# with self.assertRaises(IndexError):
# del d[100]
# self.assertEqual(len(d), initlen - 6)
#
# with self.assertRaises(IndexError):
# del d[-100]
# self.assertEqual(len(d), initlen - 6)
#
# def test_indexing_assign_example(self):
# def almost_equal_list(s, t):
# for e, f in zip(s, t):
# self.assertAlmostEqual(e, f)
#
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
#
# vara = d.domain["a"]
# metaa = d.domain.index(vara)
#
# self.assertFalse(isnan(d[0, "a"]))
# d[0] = ["3.14", "1", "f"]
# almost_equal_list(d[0].values(), [3.14, "1", "f"])
# self.assertTrue(isnan(d[0, "a"]))
# d[0] = [3.15, 1, "t"]
# almost_equal_list(d[0].values(), [3.15, "0", "t"])
# d[np.int_(0)] = [3.15, 2, "f"]
# almost_equal_list(d[0].values(), [3.15, 2, "f"])
#
# with self.assertRaises(ValueError):
# d[0] = ["3.14", "1"]
#
# with self.assertRaises(ValueError):
# d[np.int_(0)] = ["3.14", "1"]
#
# ex = data.Instance(d.domain, ["3.16", "1", "f"])
# d[0] = ex
# almost_equal_list(d[0].values(), [3.16, "1", "f"])
#
# ex = data.Instance(d.domain, ["3.16", 2, "t"])
# d[np.int_(0)] = ex
# almost_equal_list(d[0].values(), [3.16, 2, "t"])
#
# ex = data.Instance(d.domain, ["3.16", "1", "f"])
# ex["e"] = "mmmapp"
# d[0] = ex
# almost_equal_list(d[0].values(), [3.16, "1", "f"])
# self.assertEqual(d[0, "e"], "mmmapp")
#
# def test_slice(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
# x = d[:3]
# self.assertEqual(len(x), 3)
# self.assertEqual([e[0] for e in x], [0, 1.1, 2.22])
#
# x = d[2:5]
# self.assertEqual(len(x), 3)
# self.assertEqual([e[0] for e in x], [2.22, 2.23, 2.24])
#
# x = d[4:1:-1]
# self.assertEqual(len(x), 3)
# self.assertEqual([e[0] for e in x], [2.24, 2.23, 2.22])
#
# x = d[-3:]
# self.assertEqual(len(x), 3)
# self.assertEqual([e[0] for e in x], [2.26, 3.333, Unknown])
#
# def test_assign_slice_value(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
# d[2:5, 0] = 42
# self.assertEqual([e[0] for e in d],
# [0, 1.1, 42, 42, 42, 2.25, 2.26, 3.333, Unknown])
# d[:3, "b"] = 43
# self.assertEqual([e[0] for e in d],
# [43, 43, 43, 42, 42, 2.25, 2.26, 3.333, None])
# d[-2:, d.domain[0]] = 44
# self.assertEqual([e[0] for e in d],
# [43, 43, 43, 42, 42, 2.25, 2.26, 44, 44])
#
# d[2:5, "a"] = "A"
# self.assertEqual([e["a"] for e in d], list("ABAAACCDE"))
#
# def test_del_slice_example(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
#
# vals = [e[0] for e in d]
#
# del d[2:2]
# self.assertEqual([e[0] for e in d], vals)
#
# del d[2:5]
# del vals[2:5]
# self.assertEqual([e[0] for e in d], vals)
#
# del d[:]
# self.assertEqual(len(d), 0)
#
# def test_set_slice_example(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
# d[5, 0] = 42
# d[:3] = d[5]
# self.assertEqual(d[1, 0], 42)
#
# d[5:2:-1] = [3, None, None]
# self.assertEqual([e[0] for e in d],
# [42, 42, 42, 3, 3, 3, 2.26, 3.333, None])
# self.assertTrue(isnan(d[3, 2]))
#
# d[2:5] = 42
# self.assertTrue(np.all(d.X[2:5] == 42))
# self.assertEqual(d.Y[2], 0)
#
#
# def test_multiple_indices(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
#
# with self.assertRaises(IndexError):
# x = d[2, 5, 1]
#
# with self.assertRaises(IndexError):
# x = d[(2, 5, 1)]
#
# x = d[[2, 5, 1]]
# self.assertEqual([e[0] for e in x], [2.22, 2.25, 1.1])
#
# def test_assign_multiple_indices_value(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
#
# d[1:4, "b"] = 42
# self.assertEqual([e[0] for e in d],
# [0, 42, 42, 42, 2.24, 2.25, 2.26, 3.333, None])
#
# d[range(5, 2, -1), "b"] = None
# self.assertEqual([e[d.domain[0]] for e in d],
# [0, 42, 42, None, "?", "", 2.26, 3.333, None])
#
# def test_del_multiple_indices_example(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
#
# vals = [e[0] for e in d]
#
# del d[[1, 5, 2]]
# del vals[5]
# del vals[2]
# del vals[1]
# self.assertEqual([e[0] for e in d], vals)
#
# del d[range(1, 3)]
# del vals[1:3]
# self.assertEqual([e[0] for e in d], vals)
#
# def test_set_multiple_indices_example(self):
# import warnings
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# d = data.Table("test2")
#
# vals = [e[0] for e in d]
# d[[1, 2, 5]] = [42, None, None]
# vals[1] = vals[2] = vals[5] = 42
# self.assertEqual([e[0] for e in d], vals)
#
# def test_views(self):
# d = data.Table("zoo")
# crc = d.checksum(True)
# x = d[:20]
# self.assertEqual(crc, d.checksum(True))
# del x[13]
# self.assertEqual(crc, d.checksum(True))
# del x[4:9]
# self.assertEqual(crc, d.checksum(True))
#
# def test_bool(self):
# d = data.Table("iris")
# self.assertTrue(d)
# del d[:]
# self.assertFalse(d)
#
# d = data.Table("test3")
# self.assertFalse(d)
#
# d = data.Table("iris")
# self.assertTrue(d)
# d.clear()
# self.assertFalse(d)
#
# def test_checksum(self):
# d = data.Table("zoo")
# d[42, 3] = 0
# crc1 = d.checksum(False)
# d[42, 3] = 1
# crc2 = d.checksum(False)
# self.assertNotEqual(crc1, crc2)
# d[42, 3] = 0
# crc3 = d.checksum(False)
# self.assertEqual(crc1, crc3)
# _ = d[42, "name"]
# d[42, "name"] = "non-animal"
# crc4 = d.checksum(False)
# self.assertEqual(crc1, crc4)
# crc4 = d.checksum(True)
# crc5 = d.checksum(1)
# crc6 = d.checksum(False)
# self.assertNotEqual(crc1, crc4)
# self.assertNotEqual(crc1, crc5)
# self.assertEqual(crc1, crc6)
#
# def test_total_weight(self):
# d = data.Table("zoo")
# self.assertEqual(d.total_weight(), len(d))
#
# d.set_weights(0)
# d[0].weight = 0.1
# d[10].weight = 0.2
# d[-1].weight = 0.3
# self.assertAlmostEqual(d.total_weight(), 0.6)
# del d[10]
# self.assertAlmostEqual(d.total_weight(), 0.4)
# d.clear()
# self.assertAlmostEqual(d.total_weight(), 0)
#
# def test_has_missing(self):
# d = data.Table("zoo")
# self.assertFalse(d.has_missing())
# self.assertFalse(d.has_missing_class())
#
# d[10, 3] = "?"
# self.assertTrue(d.has_missing())
# self.assertFalse(d.has_missing_class())
#
# d[10].set_class("?")
# self.assertTrue(d.has_missing())
# self.assertTrue(d.has_missing_class())
#
# d = data.Table("test3")
# self.assertFalse(d.has_missing())
# self.assertFalse(d.has_missing_class())
#
# def test_shuffle(self):
# d = data.Table("zoo")
# crc = d.checksum()
# names = set(str(x["name"]) for x in d)
#
# d.shuffle()
# self.assertNotEqual(crc, d.checksum())
# self.assertSetEqual(names, set(str(x["name"]) for x in d))
# crc2 = d.checksum()
#
# x = d[2:10]
# crcx = x.checksum()
# d.shuffle()
# self.assertNotEqual(crc2, d.checksum())
# self.assertEqual(crcx, x.checksum())
#
# crc2 = d.checksum()
# x.shuffle()
# self.assertNotEqual(crcx, x.checksum())
# self.assertEqual(crc2, d.checksum())
#
# @staticmethod
# def not_less_ex(ex1, ex2):
# for v1, v2 in zip(ex1, ex2):
# if v1 != v2:
# return v1 < v2
# return True
#
# @staticmethod
# def sorted(d):
# for i in range(1, len(d)):
# if not TableTestCase.not_less_ex(d[i - 1], d[i]):
# return False
# return True
#
# @staticmethod
# def not_less_ex_ord(ex1, ex2, ord):
# for a in ord:
# if ex1[a] != ex2[a]:
# return ex1[a] < ex2[a]
# return True
#
# @staticmethod
# def sorted_ord(d, ord):
# for i in range(1, len(d)):
# if not TableTestCase.not_less_ex_ord(d[i - 1], d[i], ord):
# return False
# return True
#
# def test_append(self):
# d = data.Table("test3")
# d.append([None] * 3)
# self.assertEqual(1, len(d))
# self.assertTrue(all(isnan(i) for i in d[0]))
#
# d.append([42, "0", None])
# self.assertEqual(2, len(d))
# self.assertEqual(d[1], [42, "0", None])
#
# def test_append2(self):
# d = data.Table("iris")
# d.shuffle()
# l1 = len(d)
# d.append([1, 2, 3, 4, 0])
# self.assertEqual(len(d), l1 + 1)
# self.assertEqual(d[-1], [1, 2, 3, 4, 0])
#
# x = data.Instance(d[10])
# d.append(x)
# self.assertEqual(d[-1], d[10])
#
# x = d[:50]
# with self.assertRaises(ValueError):
# x.append(d[50])
#
# x.ensure_copy()
# x.append(d[50])
# self.assertEqual(x[50], d[50])
#
# def test_extend(self):
# d = data.Table("iris")
# d.shuffle()
#
# x = d[:5]
# x.ensure_copy()
# d.extend(x)
# for i in range(5):
# self.assertTrue(d[i] == d[-5 + i])
#
# # Disabled to support LazyTables.
# #x = d[:5]
# #with self.assertRaises(ValueError):
# # d.extend(x)
#
# y = d[:2, 1]
# x.ensure_copy()
# x.extend(y)
# np.testing.assert_almost_equal(x[-2:, 1].X, y.X)
# self.assertEqual(np.isnan(x).sum(), 8)
#
# def test_copy(self):
# t = data.Table(np.zeros((5, 3)), np.arange(5), np.zeros((5, 3)))
#
# copy = t.copy()
# self.assertTrue(np.all(t.X == copy.X))
# self.assertTrue(np.all(t.Y == copy.Y))
# self.assertTrue(np.all(t.metas == copy.metas))
# copy[0] = [1, 1, 1, 1, 1, 1, 1, 1]
# self.assertFalse(np.all(t.X == copy.X))
# self.assertFalse(np.all(t.Y == copy.Y))
# self.assertFalse(np.all(t.metas == copy.metas))
#
# def test_concatenate(self):
# d1 = data.Domain([data.ContinuousVariable('a1')])
# t1 = data.Table.from_numpy(d1, [[1],
# [2]])
# d2 = data.Domain([data.ContinuousVariable('a2')], metas=[data.StringVariable('s')])
# t2 = data.Table.from_numpy(d2, [[3],
# [4]], metas=[['foo'],
# ['fuu']])
# self.assertRaises(ValueError, lambda: data.Table.concatenate((t1, t2), axis=5))
#
# t3 = data.Table.concatenate((t1, t2))
# self.assertEqual(t3.domain.attributes, t1.domain.attributes + t2.domain.attributes)
# self.assertEqual(len(t3.domain.metas), 1)
# self.assertEqual(t3.X.shape, (2,2))
# self.assertRaises(ValueError, lambda: data.Table.concatenate((t3, t1)))
#
# t4 = data.Table.concatenate((t3, t3), axis=0)
# np.testing.assert_equal(t4.X, [[1, 3],
# [2, 4],
# [1, 3],
# [2, 4]])
# t4 = data.Table.concatenate((t3, t1), axis=0)
# np.testing.assert_equal(t4.X, [[1, 3],
# [2, 4],
# [1, np.nan],
# [2, np.nan]])
#
#
#
# def test_convert_through_append(self):
# d = data.Table("iris")
# dom2 = data.Domain([d.domain[0], d.domain[2], d.domain[4]])
# d2 = data.Table(dom2)
# dom3 = data.Domain([d.domain[1], d.domain[2]], None)
# d3 = data.Table(dom3)
# for e in d[:5]:
# d2.append(e)
# d3.append(e)
# for e, e2, e3 in zip(d, d2, d3):
# self.assertEqual(e[0], e2[0])
# self.assertEqual(e[1], e3[0])
#
# def test_pickle(self):
# import pickle
#
# d = data.Table("zoo")
# s = pickle.dumps(d)
# d2 = pickle.loads(s)
# self.assertEqual(d[0], d2[0])
#
# self.assertEqual(d.checksum(include_metas=False),
# d2.checksum(include_metas=False))
#
# d = data.Table("iris")
# s = pickle.dumps(d)
# d2 = pickle.loads(s)
# self.assertEqual(d[0], d2[0])
# self.assertEqual(d.checksum(include_metas=False),
# d2.checksum(include_metas=False))
#
# def test_translate_through_slice(self):
# d = data.Table("iris")
# dom = data.Domain(["petal length", "sepal length", "iris"],
# source=d.domain)
# d_ref = d[:10, dom]
# self.assertEqual(d_ref.domain.class_var, d.domain.class_var)
# self.assertEqual(d_ref[0, "petal length"], d[0, "petal length"])
# self.assertEqual(d_ref[0, "sepal length"], d[0, "sepal length"])
# self.assertEqual(d_ref.X.shape, (10, 2))
# self.assertEqual(d_ref.Y.shape, (10,))
#
# def test_saveTab(self):
# d = data.Table("iris")[:3]
# d.save("test-save.tab")
# try:
# d2 = data.Table("test-save.tab")
# for e1, e2 in zip(d, d2):
# self.assertEqual(e1, e2)
# finally:
# os.remove("test-save.tab")
#
# dom = data.Domain([data.ContinuousVariable("a")])
# d = data.Table(dom)
# d += [[i] for i in range(3)]
# d.save("test-save.tab")
# try:
# d2 = data.Table("test-save.tab")
# self.assertEqual(len(d.domain.attributes), 1)
# self.assertEqual(d.domain.class_var, None)
# for i in range(3):
# self.assertEqual(d2[i], [i])
# finally:
# os.remove("test-save.tab")
#
# dom = data.Domain([data.ContinuousVariable("a")], None)
# d = data.Table(dom)
# d += [[i] for i in range(3)]
# d.save("test-save.tab")
# try:
# d2 = data.Table("test-save.tab")
# self.assertEqual(len(d.domain.attributes), 1)
# for i in range(3):
# self.assertEqual(d2[i], [i])
# finally:
# os.remove("test-save.tab")
#
# d = data.Table("zoo")
# d.save("test-zoo.tab")
# dd = data.Table("test-zoo")
#
# try:
# self.assertTupleEqual(d.domain.metas, dd.domain.metas, msg="Meta attributes don't match.")
# self.assertTupleEqual(d.domain.variables, dd.domain.variables, msg="Attributes don't match.")
#
# for i in range(10):
# for j in d.domain.variables:
# self.assertEqual(d[i][j], dd[i][j])
# finally:
# os.remove("test-zoo.tab")
#
# def test_save_pickle(self):
# table = data.Table("iris")
# try:
# table.save("iris.pickle")
# table2 = data.Table.from_file("iris.pickle")
# np.testing.assert_almost_equal(table.X, table2.X)
# np.testing.assert_almost_equal(table.Y, table2.Y)
# self.assertIs(table.domain[0], table2.domain[0])
# finally:
# os.remove("iris.pickle")
#
# def test_from_numpy(self):
# import random
#
# a = np.arange(20, dtype="d").reshape((4, 5))
# a[:, -1] = [0, 0, 0, 1]
# dom = data.Domain([data.ContinuousVariable(x) for x in "abcd"],
# data.DiscreteVariable("e", values=["no", "yes"]))
# table = data.Table(dom, a)
# for i in range(4):
# self.assertEqual(table[i].get_class(), "no" if i < 3 else "yes")
# for j in range(5):
# self.assertEqual(a[i, j], table[i, j])
# table[i, j] = random.random()
# self.assertEqual(a[i, j], table[i, j])
#
# with self.assertRaises(IndexError):
# table[0, -5] = 5
#
# def test_filter_is_defined(self):
# d = data.Table("iris")
# d[1, 4] = Unknown
# self.assertTrue(isnan(d[1, 4]))
# d[140, 0] = Unknown
# e = filter.IsDefined()(d)
# self.assertEqual(len(e), len(d) - 2)
# self.assertEqual(e[0], d[0])
# self.assertEqual(e[1], d[2])
# self.assertEqual(e[147], d[149])
# self.assertTrue(d.has_missing())
# self.assertFalse(e.has_missing())
#
# def test_filter_has_class(self):
# d = data.Table("iris")
# d[1, 4] = Unknown
# self.assertTrue(isnan(d[1, 4]))
# d[140, 0] = Unknown
# e = filter.HasClass()(d)
# self.assertEqual(len(e), len(d) - 1)
# self.assertEqual(e[0], d[0])
# self.assertEqual(e[1], d[2])
# self.assertEqual(e[148], d[149])
# self.assertTrue(d.has_missing())
# self.assertTrue(e.has_missing())
# self.assertFalse(e.has_missing_class())
#
# def test_filter_random(self):
# d = data.Table("iris")
# e = filter.Random(50)(d)
# self.assertEqual(len(e), 50)
# e = filter.Random(50, negate=True)(d)
# self.assertEqual(len(e), 100)
# for i in range(5):
# e = filter.Random(0.2)(d)
# self.assertEqual(len(e), 30)
# bc = np.bincount(np.array(e.Y[:], dtype=int))
# if min(bc) > 7:
# break
# else:
# self.fail("Filter returns too uneven distributions")
#
# def test_filter_same_value(self):
# d = data.Table("zoo")
# mind = d.domain["type"].to_val("mammal")
# lind = d.domain["legs"].to_val("4")
# gind = d.domain["name"].to_val("girl")
# for pos, val, r in (("type", "mammal", mind),
# (len(d.domain.attributes), mind, mind),
# ("legs", lind, lind),
# ("name", "girl", gind)):
# e = filter.SameValue(pos, val)(d)
# f = filter.SameValue(pos, val, negate=True)(d)
# self.assertEqual(len(e) + len(f), len(d))
# self.assertTrue(all(ex[pos] == r for ex in e))
# self.assertTrue(all(ex[pos] != r for ex in f))
def test_filter_value_continuous(self):
d = self.data1
# Slicing of LazyTables is not yet supported.
#vals = [
# r['a']
# for r in d[:3]
#]
vals = [
d[i]['a']
for i in range(3)
]
my_min = min(vals)
my_max = max(vals)
v = d.columns
# Check Between.
f = filter.FilterContinuous(v.a,
filter.FilterContinuous.Between,
min=my_min, max=my_max)
x = filter.Values([f])(d)
# Assure there is at least 1 row.
row = x[0]
# Use iteration, the natural interface to LazyTables.
for row in islice(x, 10):
self.assertTrue(my_min <= row['a'] <= my_max)
f.ref = my_min
f.oper = filter.FilterContinuous.Equal
x = filter.Values([f])(d)
row = x[0]
# Can only test 1 value, since it does not repeat.
for row in islice(x, 1):
self.assertTrue(row['a'] == my_min)
f.oper = filter.FilterContinuous.NotEqual
x = filter.Values([f])(d)
row = x[0]
for row in islice(x, 10):
self.assertTrue(row['a'] != my_min)
f.oper = filter.FilterContinuous.Less
x = filter.Values([f])(d)
row = x[0]
for row in islice(x, 10):
self.assertTrue(row['a'] < my_max)
f.oper = filter.FilterContinuous.LessEqual
x = filter.Values([f])(d)
row = x[0]
for row in islice(x, 10):
self.assertTrue(row['a'] <= my_max)
f.oper = filter.FilterContinuous.Greater
x = filter.Values([f])(d)
row = x[0]
for row in islice(x, 10):
self.assertTrue(row['a'] > my_min)
f.oper = filter.FilterContinuous.GreaterEqual
x = filter.Values([f])(d)
row = x[0]
for row in islice(x, 10):
self.assertTrue(row['a'] >= my_min)
# Check conjugation.
f1 = filter.FilterContinuous(v.a,
filter.FilterContinuous.GreaterEqual,
my_min)
f2 = filter.FilterContinuous(v.a,
filter.FilterContinuous.LessEqual,
my_max)
x = filter.Values([f1, f2])(d)
# Assure there is at least 1 row.
row = x[0]
# Use iteration, the natural interface to LazyTables.
for row in islice(x, 10):
self.assertTrue(my_min <= row['a'] <= my_max)
f.oper = filter.FilterContinuous.Outside
f.ref, f.max = my_min, my_max
x = filter.Values([f])(d)
for row in islice(x, 10):
self.assertTrue(row['a'] < my_min or row['a'] > my_max)
# Cannot test for undefined, because not supported.
# Perhaps test with LazyFile?
#f.oper = filter.FilterContinuous.IsDefined
#f.ref = f.max = None
#x = filter.Values([f])(d)
#self.assertEqual(len(x), len(d))
#
#d[:30, v.petal_length] = Unknown
#x = filter.Values([f])(d)
#self.assertEqual(len(x), len(d) - 30)
# def test_filter_value_continuous_args(self):
# d = data.Table("iris")
# col = d.X[:, 2]
# v = d.columns
#
# f = filter.FilterContinuous(v.petal_length,
# filter.FilterContinuous.Equal, ref=5.1)
# x = filter.Values([f])(d)
# self.assertTrue(np.all(x.X[:, 2] == 5.1))
# self.assertEqual(sum(col == 5.1), len(x))
#
# f = filter.FilterContinuous(2,
# filter.FilterContinuous.Equal, ref=5.1)
# self.assertTrue(np.all(x.X[:, 2] == 5.1))
# self.assertEqual(sum(col == 5.1), len(x))
#
# f = filter.FilterContinuous("petal length",
# filter.FilterContinuous.Equal, ref=5.1)
# self.assertTrue(np.all(x.X[:, 2] == 5.1))
# self.assertEqual(sum(col == 5.1), len(x))
#
# f = filter.FilterContinuous("sepal length",
# filter.FilterContinuous.Equal, ref=5.1)
# f.column = 2
# self.assertTrue(np.all(x.X[:, 2] == 5.1))
# self.assertEqual(sum(col == 5.1), len(x))
#
# f = filter.FilterContinuous("sepal length",
# filter.FilterContinuous.Equal, ref=5.1)
# f.column = v.petal_length
# self.assertTrue(np.all(x.X[:, 2] == 5.1))
# self.assertEqual(sum(col == 5.1), len(x))
#
# f = filter.FilterContinuous(v.petal_length,
# filter.FilterContinuous.Equal, ref=18)
# f.ref = 5.1
# x = filter.Values([f])(d)
# self.assertTrue(np.all(x.X[:, 2] == 5.1))
# self.assertEqual(sum(col == 5.1), len(x))
#
# f = filter.FilterContinuous(v.petal_length,
# filter.FilterContinuous.Equal, ref=18)
# f.ref = 5.1
# x = filter.Values([f])(d)
# self.assertTrue(np.all(x.X[:, 2] == 5.1))
# self.assertEqual(sum(col == 5.1), len(x))
#
# def test_valueFilter_discrete(self):
# d = data.Table("zoo")
#
# f = filter.FilterDiscrete(d.domain.class_var, values=[2, 3, 4])
# for e in filter.Values([f])(d):
# self.assertTrue(e.get_class() in [2, 3, 4])
#
# f.values = ["mammal"]
# for e in filter.Values([f])(d):
# self.assertTrue(e.get_class() == "mammal")
#
# f = filter.FilterDiscrete(d.domain.class_var, values=[2, "mammal"])
# for e in filter.Values([f])(d):
# self.assertTrue(e.get_class() in [2, "mammal"])
#
# f = filter.FilterDiscrete(d.domain.class_var, values=[2, "martian"])
# self.assertRaises(ValueError, d._filter_values, f)
#
# f = filter.FilterDiscrete(d.domain.class_var, values=[2, data.Table])
# self.assertRaises(TypeError, d._filter_values, f)
#
# v = d.columns
# f = filter.FilterDiscrete(v.hair, values=None)
# self.assertEqual(len(filter.Values([f])(d)), len(d))
#
# d[:5, v.hair] = Unknown
# self.assertEqual(len(filter.Values([f])(d)), len(d) - 5)
#
# def test_valueFilter_string_case_sens(self):
# d = data.Table("zoo")
# col = d[:, "name"].metas[:, 0]
#
# f = filter.FilterString("name",
# filter.FilterString.Equal, "girl")
# x = filter.Values([f])(d)
# self.assertEqual(len(x), 1)
# self.assertEqual(x[0, "name"], "girl")
# self.assertTrue(np.all(x.metas == "girl"))
#
# f.oper = f.NotEqual
# x = filter.Values([f])(d)
# self.assertEqual(len(x), len(d) - 1)
# self.assertTrue(np.all(x[:, "name"] != "girl"))
#
# f.oper = f.Less
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(col < "girl"))
# self.assertTrue(np.all(x.metas < "girl"))
#
# f.oper = f.LessEqual
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(col <= "girl"))
# self.assertTrue(np.all(x.metas <= "girl"))
#
# f.oper = f.Greater
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(col > "girl"))
# self.assertTrue(np.all(x.metas > "girl"))
#
# f.oper = f.GreaterEqual
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(col >= "girl"))
# self.assertTrue(np.all(x.metas >= "girl"))
#
# f.oper = f.Between
# f.max = "lion"
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(("girl" <= col) * (col <= "lion")))
# self.assertTrue(np.all(x.metas >= "girl"))
# self.assertTrue(np.all(x.metas <= "lion"))
#
# f.oper = f.Outside
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(col < "girl") + sum(col > "lion"))
# self.assertTrue(np.all((x.metas < "girl") + (x.metas > "lion")))
#
# f.oper = f.Contains
# f.ref = "ea"
# x = filter.Values([f])(d)
# for e in x:
# self.assertTrue("ea" in e["name"])
# self.assertEqual(len(x), len([e for e in col if "ea" in e]))
#
# f.oper = f.StartsWith
# f.ref = "sea"
# x = filter.Values([f])(d)
# for e in x:
# self.assertTrue(str(e["name"]).startswith("sea"))
# self.assertEqual(len(x), len([e for e in col if e.startswith("sea")]))
#
# f.oper = f.EndsWith
# f.ref = "ion"
# x = filter.Values([f])(d)
# for e in x:
# self.assertTrue(str(e["name"]).endswith("ion"))
# self.assertEqual(len(x), len([e for e in col if e.endswith("ion")]))
#
# def test_valueFilter_string_case_insens(self):
# d = data.Table("zoo")
# d[d[:, "name"].metas[:, 0] == "girl", "name"] = "GIrl"
#
# col = d[:, "name"].metas[:, 0]
#
# f = filter.FilterString("name",
# filter.FilterString.Equal, "giRL")
# f.case_sensitive = False
# x = filter.Values([f])(d)
# self.assertEqual(len(x), 1)
# self.assertEqual(x[0, "name"], "GIrl")
# self.assertTrue(np.all(x.metas == "GIrl"))
#
# f.oper = f.NotEqual
# x = filter.Values([f])(d)
# self.assertEqual(len(x), len(d) - 1)
# self.assertTrue(np.all(x[:, "name"] != "GIrl"))
#
# f.oper = f.Less
# f.ref = "CHiCKEN"
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(col < "chicken") - 1) # girl!
# self.assertTrue(np.all(x.metas < "chicken"))
#
# f.oper = f.LessEqual
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(col <= "chicken") - 1)
# self.assertTrue(np.all(x.metas <= "chicken"))
#
# f.oper = f.Greater
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(col > "chicken") + 1)
# for e in x:
# self.assertGreater(str(e["name"]).lower(), "chicken")
#
# f.oper = f.GreaterEqual
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(col >= "chicken") + 1)
# for e in x:
# self.assertGreaterEqual(str(e["name"]).lower(), "chicken")
#
# f.oper = f.Between
# f.max = "liOn"
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum((col >= "chicken") * (col <= "lion")) + 1)
# for e in x:
# self.assertTrue("chicken" <= str(e["name"]).lower() <= "lion")
#
# f.oper = f.Outside
# x = filter.Values([f])(d)
# self.assertEqual(len(x), sum(col < "chicken") + sum(col > "lion") - 1)
# self.assertTrue(np.all((x.metas < "chicken") + (x.metas > "lion")))
#
# f.oper = f.Contains
# f.ref = "iR"
# x = filter.Values([f])(d)
# for e in x:
# self.assertTrue("ir" in str(e["name"]).lower())
# self.assertEqual(len(x), len([e for e in col if "ir" in e]) + 1)
#
# f.oper = f.StartsWith
# f.ref = "GI"
# x = filter.Values([f])(d)
# for e in x:
# self.assertTrue(str(e["name"]).lower().startswith("gi"))
# self.assertEqual(len(x),
# len([e for e in col if e.lower().startswith("gi")]))
#
# f.oper = f.EndsWith
# f.ref = "ion"
# x = filter.Values([f])(d)
# for e in x:
# self.assertTrue(str(e["name"]).endswith("ion"))
# self.assertEqual(len(x), len([e for e in col if e.endswith("ion")]))
#
# def test_valueFilter_regex(self):
# d = data.Table("zoo")
# f = filter.FilterRegex(d.domain['name'], '^c...$')
# x = filter.Values([f])(d)
# self.assertEqual(len(x), 7)
#
# def test_table_dtypes(self):
# table = data.Table("iris")
# metas = np.hstack((table.metas, table.Y.reshape(len(table), 1)))
# attributes_metas = table.domain.metas + table.domain.class_vars
# domain_metas = data.Domain(table.domain.attributes,
# table.domain.class_vars,
# attributes_metas)
# table_metas = data.Table(domain_metas, table.X, table.Y, metas)
# new_table = data.Table(data.Domain(table_metas.domain.metas,
# table_metas.domain.metas,
# table_metas.domain.metas),
# table_metas)
# self.assertTrue(new_table.X.dtype == np.float64)
# self.assertTrue(new_table.Y.dtype == np.float64)
# self.assertTrue(new_table.metas.dtype == np.float64)
#
# # TODO Test conjunctions and disjunctions of conditions
#
#
#def column_sizes(table):
# return (len(table.domain.attributes),
# len(table.domain.class_vars),
# len(table.domain.metas))
#
#
#class TableTests(unittest.TestCase):
# attributes = ["Feature %i" % i for i in range(10)]
# class_vars = ["Class %i" % i for i in range(1)]
# metas = ["Meta %i" % i for i in range(5)]
# nrows = 10
# row_indices = (1, 5, 7, 9)
#
# data = np.random.random((nrows, len(attributes)))
# class_data = np.random.random((nrows, len(class_vars)))
# meta_data = np.random.random((nrows, len(metas)))
# weight_data = np.random.random((nrows, 1))
#
# def setUp(self):
# self.data = np.random.random((self.nrows, len(self.attributes)))
# self.class_data = np.random.random((self.nrows, len(self.class_vars)))
# if len(self.class_vars) == 1:
# self.class_data = self.class_data.flatten()
# self.meta_data = np.random.randint(0, 5, (self.nrows, len(self.metas))
# ).astype(object)
# self.weight_data = np.random.random((self.nrows, 1))
#
# def mock_domain(self, with_classes=False, with_metas=False):
# attributes = self.attributes
# class_vars = self.class_vars if with_classes else []
# metas = self.metas if with_metas else []
# variables = attributes + class_vars
# return MagicMock(data.Domain,
# attributes=attributes,
# class_vars=class_vars,
# metas=metas,
# variables=variables)
#
# def create_domain(self, attributes=(), classes=(), metas=()):
# attr_vars = [data.ContinuousVariable(name=a) if isinstance(a, str)
# else a for a in attributes]
# class_vars = [data.ContinuousVariable(name=c) if isinstance(c, str)
# else c for c in classes]
# meta_vars = [data.DiscreteVariable(name=m, values=map(str, range(5)))
# if isinstance(m, str) else m for m in metas]
#
# domain = data.Domain(attr_vars, class_vars, meta_vars)
# return domain
#
#
#class CreateEmptyTable(TableTests):
# def test_calling_new_with_no_parameters_constructs_a_new_instance(self):
# table = data.Table()
# self.assertIsInstance(table, data.Table)
#
# def test_table_has_file(self):
# table = data.Table()
# self.assertIsNone(table.__file__)
#
#
#class CreateTableWithFilename(TableTests):
# filename = "data.tab"
#
# @patch("os.path.exists", Mock(return_value=True))
# @patch("Orange.data.io.TabDelimFormat")
# def test_read_data_calls_reader(self, reader_mock):
# table_mock = Mock(data.Table)
# reader_instance = reader_mock.return_value = \
# Mock(read_file=Mock(return_value=table_mock))
#
# table = data.Table.from_file(self.filename)
#
# reader_instance.read_file.assert_called_with(self.filename, data.Table)
# self.assertEqual(table, table_mock)
#
# @patch("os.path.exists", Mock(return_value=True))
# def test_read_data_calls_reader(self):
# table_mock = Mock(data.Table)
# reader_instance = Mock(read_file=Mock(return_value=table_mock))
#
# with patch.dict(data.io.FileFormats.readers,
# {'.xlsx': lambda: reader_instance}):
# table = data.Table.from_file("test.xlsx")
#
# reader_instance.read_file.assert_called_with("test.xlsx", data.Table)
# self.assertEqual(table, table_mock)
#
# @patch("os.path.exists", Mock(return_value=False))
# def test_raises_error_if_file_does_not_exist(self):
# with self.assertRaises(IOError):
# data.Table.from_file(self.filename)
#
# @patch("os.path.exists", Mock(return_value=True))
# def test_raises_error_if_file_has_unknown_extension(self):
# with self.assertRaises(IOError):
# data.Table.from_file("file.invalid_extension")
#
# @patch("Orange.data.table.Table.from_file")
# def test_calling_new_with_string_argument_calls_read_data(self, read_data):
# data.Table(self.filename)
#
# read_data.assert_called_with(self.filename)
#
# @patch("Orange.data.table.Table.from_file")
# def test_calling_new_with_keyword_argument_filename_calls_read_data(
# self, read_data):
# data.Table(filename=self.filename)
#
# read_data.assert_called_with(self.filename)
#
#
#class CreateTableWithUrl(TableTests):
# def test_load_from_url(self):
# d1 = data.Table('iris')
# d2 = data.Table('https://raw.githubusercontent.com/biolab/orange3/master/Orange/datasets/iris.tab')
# np.testing.assert_array_equal(d1.X, d2.X)
# np.testing.assert_array_equal(d1.Y, d2.Y)
#
#
#class CreateTableWithDomain(TableTests):
# def test_creates_an_empty_table_with_given_domain(self):
# domain = self.mock_domain()
# table = data.Table.from_domain(domain)
#
# self.assertEqual(table.domain, domain)
#
# def test_creates_zero_filled_rows_in_X_if_domain_contains_attributes(self):
# domain = self.mock_domain()
# table = data.Table.from_domain(domain, self.nrows)
#
# self.assertEqual(table.X.shape, (self.nrows, len(domain.attributes)))
# self.assertFalse(table.X.any())
#
# def test_creates_zero_filled_rows_in_Y_if_domain_contains_class_vars(self):
# domain = self.mock_domain(with_classes=True)
# table = data.Table.from_domain(domain, self.nrows)
#
# if len(domain.class_vars) != 1:
# self.assertEqual(table.Y.shape,
# (self.nrows, len(domain.class_vars)))
# else:
# self.assertEqual(table.Y.shape, (self.nrows,))
# self.assertFalse(table.Y.any())
#
# def test_creates_zero_filled_rows_in_metas_if_domain_contains_metas(self):
# domain = self.mock_domain(with_metas=True)
# table = data.Table.from_domain(domain, self.nrows)
#
# self.assertEqual(table.metas.shape, (self.nrows, len(domain.metas)))
# self.assertFalse(table.metas.any())
#
# def test_creates_weights_if_weights_are_true(self):
# domain = self.mock_domain()
# table = data.Table.from_domain(domain, self.nrows, True)
#
# self.assertEqual(table.W.shape, (self.nrows, ))
#
# def test_does_not_create_weights_if_weights_are_false(self):
# domain = self.mock_domain()
# table = data.Table.from_domain(domain, self.nrows, False)
#
# self.assertEqual(table.W.shape, (self.nrows, 0))
#
# @patch("Orange.data.table.Table.from_domain")
# def test_calling_new_with_domain_calls_new_from_domain(
# self, new_from_domain):
# domain = self.mock_domain()
# data.Table(domain)
#
# new_from_domain.assert_called_with(domain)
#
#
#class CreateTableWithData(TableTests):
# def test_creates_a_table_with_given_X(self):
# # from numpy
# table = data.Table(np.array(self.data))
# self.assertIsInstance(table.domain, data.Domain)
# np.testing.assert_almost_equal(table.X, self.data)
#
# # from list
# table = data.Table(list(self.data))
# self.assertIsInstance(table.domain, data.Domain)
# np.testing.assert_almost_equal(table.X, self.data)
#
# # from tuple
# table = data.Table(tuple(self.data))
# self.assertIsInstance(table.domain, data.Domain)
# np.testing.assert_almost_equal(table.X, self.data)
#
# def test_creates_a_table_from_domain_and_list(self):
# domain = data.Domain([data.DiscreteVariable(name="a", values="mf"),
# data.ContinuousVariable(name="b")],
# data.DiscreteVariable(name="y", values="abc"))
# table = data.Table(domain, [[0, 1, 2],
# [1, 2, "?"],
# ["m", 3, "a"],
# ["?", "?", "c"]])
# self.assertIs(table.domain, domain)
# np.testing.assert_almost_equal(
# table.X, np.array([[0, 1], [1, 2], [0, 3], [np.nan, np.nan]]))
# np.testing.assert_almost_equal(table.Y, np.array([2, np.nan, 0, 2]))
#
# def test_creates_a_table_from_domain_and_list_and_weights(self):
# domain = data.Domain([data.DiscreteVariable(name="a", values="mf"),
# data.ContinuousVariable(name="b")],
# data.DiscreteVariable(name="y", values="abc"))
# table = data.Table(domain, [[0, 1, 2],
# [1, 2, "?"],
# ["m", 3, "a"],
# ["?", "?", "c"]], [1, 2, 3, 4])
# self.assertIs(table.domain, domain)
# np.testing.assert_almost_equal(
# table.X, np.array([[0, 1], [1, 2], [0, 3], [np.nan, np.nan]]))
# np.testing.assert_almost_equal(table.Y, np.array([2, np.nan, 0, 2]))
# np.testing.assert_almost_equal(table.W, np.array([1, 2, 3, 4]))
#
# def test_creates_a_table_from_domain_and_list_and_metas(self):
# metas = [data.DiscreteVariable("Meta 1", values="XYZ"),
# data.ContinuousVariable("Meta 2"),
# data.StringVariable("Meta 3")]
# domain = data.Domain([data.DiscreteVariable(name="a", values="mf"),
# data.ContinuousVariable(name="b")],
# data.DiscreteVariable(name="y", values="abc"),
# metas=metas)
# table = data.Table(domain, [[0, 1, 2, "X", 2, "bb"],
# [1, 2, "?", "Y", 1, "aa"],
# ["m", 3, "a", "Z", 3, "bb"],
# ["?", "?", "c", "X", 1, "aa"]])
# self.assertIs(table.domain, domain)
# np.testing.assert_almost_equal(
# table.X, np.array([[0, 1], [1, 2], [0, 3], [np.nan, np.nan]]))
# np.testing.assert_almost_equal(table.Y, np.array([2, np.nan, 0, 2]))
# np.testing.assert_array_equal(table.metas,
# np.array([[0, 2., "bb"],
# [1, 1., "aa"],
# [2, 3., "bb"],
# [0, 1., "aa"]],
# dtype=object))
#
# def test_creates_a_table_from_list_of_instances(self):
# table = data.Table('iris')
# new_table = data.Table(table.domain, [d for d in table])
# self.assertIs(table.domain, new_table.domain)
# np.testing.assert_almost_equal(table.X, new_table.X)
# np.testing.assert_almost_equal(table.Y, new_table.Y)
# np.testing.assert_almost_equal(table.W, new_table.W)
# self.assertEqual(table.domain, new_table.domain)
# np.testing.assert_array_equal(table.metas, new_table.metas)
#
# def test_creates_a_table_from_list_of_instances_with_metas(self):
# table = data.Table('zoo')
# new_table = data.Table(table.domain, [d for d in table])
# self.assertIs(table.domain, new_table.domain)
# np.testing.assert_almost_equal(table.X, new_table.X)
# np.testing.assert_almost_equal(table.Y, new_table.Y)
# np.testing.assert_almost_equal(table.W, new_table.W)
# self.assertEqual(table.domain, new_table.domain)
# np.testing.assert_array_equal(table.metas, new_table.metas)
#
# def test_creates_a_table_with_domain_and_given_X(self):
# domain = self.mock_domain()
#
# table = data.Table(domain, self.data)
# self.assertIsInstance(table.domain, data.Domain)
# self.assertEqual(table.domain, domain)
# np.testing.assert_almost_equal(table.X, self.data)
#
# def test_creates_a_table_with_given_X_and_Y(self):
# table = data.Table(self.data, self.class_data)
#
# self.assertIsInstance(table.domain, data.Domain)
# np.testing.assert_almost_equal(table.X, self.data)
# np.testing.assert_almost_equal(table.Y, self.class_data)
#
# def test_creates_a_table_with_given_X_Y_and_metas(self):
# table = data.Table(self.data, self.class_data, self.meta_data)
#
# self.assertIsInstance(table.domain, data.Domain)
# np.testing.assert_almost_equal(table.X, self.data)
# np.testing.assert_almost_equal(table.Y, self.class_data)
# np.testing.assert_almost_equal(table.metas, self.meta_data)
#
# def test_creates_a_discrete_class_if_Y_has_few_distinct_values(self):
# Y = np.array([float(np.random.randint(0, 2)) for i in self.data])
# table = data.Table(self.data, Y, self.meta_data)
#
# np.testing.assert_almost_equal(table.Y, Y)
# self.assertIsInstance(table.domain.class_vars[0],
# data.DiscreteVariable)
# self.assertEqual(table.domain.class_vars[0].values, ["v1", "v2"])
#
# def test_creates_a_table_with_given_domain(self):
# domain = self.mock_domain()
# table = data.Table.from_numpy(domain, self.data)
#
# self.assertEqual(table.domain, domain)
#
# def test_sets_Y_if_given(self):
# domain = self.mock_domain(with_classes=True)
# table = data.Table.from_numpy(domain, self.data, self.class_data)
#
# np.testing.assert_almost_equal(table.Y, self.class_data)
#
# def test_sets_metas_if_given(self):
# domain = self.mock_domain(with_metas=True)
# table = data.Table.from_numpy(domain, self.data, metas=self.meta_data)
#
# np.testing.assert_almost_equal(table.metas, self.meta_data)
#
# def test_sets_weights_if_given(self):
# domain = self.mock_domain()
# table = data.Table.from_numpy(domain, self.data, W=self.weight_data)
#
# np.testing.assert_almost_equal(table.W, self.weight_data)
#
# def test_splits_X_and_Y_if_given_in_same_array(self):
# joined_data = np.column_stack((self.data, self.class_data))
# domain = self.mock_domain(with_classes=True)
# table = data.Table.from_numpy(domain, joined_data)
#
# np.testing.assert_almost_equal(table.X, self.data)
# np.testing.assert_almost_equal(table.Y, self.class_data)
#
# def test_initializes_Y_metas_and_W_if_not_given(self):
# domain = self.mock_domain()
# table = data.Table.from_numpy(domain, self.data)
#
# self.assertEqual(table.Y.shape, (self.nrows, len(domain.class_vars)))
# self.assertEqual(table.metas.shape, (self.nrows, len(domain.metas)))
# self.assertEqual(table.W.shape, (self.nrows, 0))
#
# def test_raises_error_if_columns_in_domain_and_data_do_not_match(self):
# domain = self.mock_domain(with_classes=True, with_metas=True)
# ones = np.zeros((self.nrows, 1))
#
# with self.assertRaises(ValueError):
# data_ = np.hstack((self.data, ones))
# data.Table.from_numpy(domain, data_, self.class_data,
# self.meta_data)
#
# with self.assertRaises(ValueError):
# classes_ = np.hstack((self.class_data, ones))
# data.Table.from_numpy(domain, self.data, classes_,
# self.meta_data)
#
# with self.assertRaises(ValueError):
# metas_ = np.hstack((self.meta_data, ones))
# data.Table.from_numpy(domain, self.data, self.class_data,
# metas_)
#
# def test_raises_error_if_lengths_of_data_do_not_match(self):
# domain = self.mock_domain(with_classes=True, with_metas=True)
#
# with self.assertRaises(ValueError):
# data_ = np.vstack((self.data, np.zeros((1, len(self.attributes)))))
# data.Table(domain, data_, self.class_data, self.meta_data)
#
# with self.assertRaises(ValueError):
# class_data_ = np.vstack((self.class_data,
# np.zeros((1, len(self.class_vars)))))
# data.Table(domain, self.data, class_data_, self.meta_data)
#
# with self.assertRaises(ValueError):
# meta_data_ = np.vstack((self.meta_data,
# np.zeros((1, len(self.metas)))))
# data.Table(domain, self.data, self.class_data, meta_data_)
#
# @patch("Orange.data.table.Table.from_numpy")
# def test_calling_new_with_domain_and_numpy_arrays_calls_new_from_numpy(
# self, new_from_numpy):
# domain = self.mock_domain()
# data.Table(domain, self.data)
# new_from_numpy.assert_called_with(domain, self.data)
#
# domain = self.mock_domain(with_classes=True)
# data.Table(domain, self.data, self.class_data)
# new_from_numpy.assert_called_with(domain, self.data, self.class_data)
#
# domain = self.mock_domain(with_classes=True, with_metas=True)
# data.Table(domain, self.data, self.class_data, self.meta_data)
# new_from_numpy.assert_called_with(
# domain, self.data, self.class_data, self.meta_data)
#
# data.Table(domain, self.data, self.class_data,
# self.meta_data, self.weight_data)
# new_from_numpy.assert_called_with(domain, self.data, self.class_data,
# self.meta_data, self.weight_data)
#
# def test_from_numpy_reconstructable(self):
# def assert_equal(T1, T2):
# np.testing.assert_array_equal(T1.X, T2.X)
# np.testing.assert_array_equal(T1.Y, T2.Y)
# np.testing.assert_array_equal(T1.metas, T2.metas)
# np.testing.assert_array_equal(T1.W, T2.W)
#
# nullcol = np.empty((self.nrows, 0))
# domain = self.create_domain(self.attributes)
# table = data.Table(domain, self.data)
#
# table_1 = data.Table.from_numpy(
# domain, table.X, table.Y, table.metas, table.W)
# assert_equal(table, table_1)
#
# domain = self.create_domain(classes=self.class_vars)
# table = data.Table(domain, nullcol, self.class_data)
#
# table_1 = data.Table.from_numpy(
# domain, table.X, table.Y, table.metas, table.W)
# assert_equal(table, table_1)
#
# domain = self.create_domain(metas=self.metas)
# table = data.Table(domain, nullcol, nullcol, self.meta_data)
#
# table_1 = data.Table.from_numpy(
# domain, table.X, table.Y, table.metas, table.W)
# assert_equal(table, table_1)
#
#
#class CreateTableWithDomainAndTable(TableTests):
# interesting_slices = [
# slice(0, 0), # [0:0] - empty slice
# slice(1), # [:1] - only first element
# slice(1, None), # [1:] - all but first
# slice(-1, None), # [-1:] - only last element
# slice(-1), # [:-1] - all but last
# slice(None), # [:] - all elements
# slice(None, None, 2), # [::2] - even elements
# slice(None, None, -1), # [::-1]- all elements reversed
# ]
#
# row_indices = [1, 5, 6, 7]
#
# def setUp(self):
# self.domain = self.create_domain(
# self.attributes, self.class_vars, self.metas)
# self.table = data.Table(
# self.domain, self.data, self.class_data, self.meta_data)
#
# def test_creates_table_with_given_domain(self):
# new_table = data.Table.from_table(self.table.domain, self.table)
#
# self.assertIsInstance(new_table, data.Table)
# self.assertIsNot(self.table, new_table)
# self.assertEqual(new_table.domain, self.domain)
#
# def test_can_copy_table(self):
# new_table = data.Table.from_table(self.domain, self.table)
# self.assert_table_with_filter_matches(new_table, self.table)
#
# def test_can_filter_rows_with_list(self):
# for indices in ([0], [1, 5, 6, 7]):
# new_table = data.Table.from_table(
# self.domain, self.table, row_indices=indices)
# self.assert_table_with_filter_matches(
# new_table, self.table, rows=indices)
#
# def test_can_filter_row_with_slice(self):
# for slice_ in self.interesting_slices:
# new_table = data.Table.from_table(
# self.domain, self.table, row_indices=slice_)
# self.assert_table_with_filter_matches(
# new_table, self.table, rows=slice_)
#
# def test_can_use_attributes_as_new_columns(self):
# a, c, m = column_sizes(self.table)
# order = [random.randrange(a) for _ in self.domain.attributes]
# new_attributes = [self.domain.attributes[i] for i in order]
# new_domain = self.create_domain(
# new_attributes, new_attributes, new_attributes)
# new_table = data.Table.from_table(new_domain, self.table)
#
# self.assert_table_with_filter_matches(
# new_table, self.table, xcols=order, ycols=order, mcols=order)
#
# def test_can_use_class_vars_as_new_columns(self):
# a, c, m = column_sizes(self.table)
# order = [random.randrange(a, a + c) for _ in self.domain.class_vars]
# new_classes = [self.domain.class_vars[i - a] for i in order]
# new_domain = self.create_domain(new_classes, new_classes, new_classes)
# new_table = data.Table.from_table(new_domain, self.table)
#
# self.assert_table_with_filter_matches(
# new_table, self.table, xcols=order, ycols=order, mcols=order)
#
# def test_can_use_metas_as_new_columns(self):
# a, c, m = column_sizes(self.table)
# order = [random.randrange(-m + 1, 0) for _ in self.domain.metas]
# new_metas = [self.domain.metas[::-1][i] for i in order]
# new_domain = self.create_domain(new_metas, new_metas, new_metas)
# new_table = data.Table.from_table(new_domain, self.table)
#
# self.assert_table_with_filter_matches(
# new_table, self.table, xcols=order, ycols=order, mcols=order)
#
# def test_can_use_combination_of_all_as_new_columns(self):
# a, c, m = column_sizes(self.table)
# order = ([random.randrange(a) for _ in self.domain.attributes] +
# [random.randrange(a, a + c) for _ in self.domain.class_vars] +
# [random.randrange(-m + 1, 0) for _ in self.domain.metas])
# random.shuffle(order)
# vars = list(self.domain.variables) + list(self.domain.metas[::-1])
# vars = [vars[i] for i in order]
#
# new_domain = self.create_domain(vars, vars, vars)
# new_table = data.Table.from_table(new_domain, self.table)
# self.assert_table_with_filter_matches(
# new_table, self.table, xcols=order, ycols=order, mcols=order)
#
# def test_creates_table_with_given_domain_and_row_filter(self):
# a, c, m = column_sizes(self.table)
# order = ([random.randrange(a) for _ in self.domain.attributes] +
# [random.randrange(a, a + c) for _ in self.domain.class_vars] +
# [random.randrange(-m + 1, 0) for _ in self.domain.metas])
# random.shuffle(order)
# vars = list(self.domain.variables) + list(self.domain.metas[::-1])
# vars = [vars[i] for i in order]
#
# new_domain = self.create_domain(vars, vars, vars)
# new_table = data.Table.from_table(new_domain, self.table, [0])
# self.assert_table_with_filter_matches(
# new_table, self.table[:1], xcols=order, ycols=order, mcols=order)
#
# new_table = data.Table.from_table(new_domain, self.table, [2, 1, 0])
# self.assert_table_with_filter_matches(
# new_table, self.table[2::-1], xcols=order, ycols=order, mcols=order)
#
# new_table = data.Table.from_table(new_domain, self.table, [])
# self.assert_table_with_filter_matches(
# new_table, self.table[:0], xcols=order, ycols=order, mcols=order)
#
# def assert_table_with_filter_matches(
# self, new_table, old_table,
# rows=..., xcols=..., ycols=..., mcols=...):
# a, c, m = column_sizes(old_table)
# xcols = slice(a) if xcols is Ellipsis else xcols
# ycols = slice(a, a + c) if ycols is Ellipsis else ycols
# mcols = slice(None, -m - 1, -1) if mcols is Ellipsis else mcols
#
# # Indexing used by convert_domain uses positive indices for variables
# # and classes (classes come after attributes) and negative indices for
# # meta features. This is equivalent to ordinary indexing in a magic
# # table below.
# magic = np.hstack((old_table.X, old_table.Y[:, None],
# old_table.metas[:, ::-1]))
# np.testing.assert_almost_equal(new_table.X, magic[rows, xcols])
# Y = magic[rows, ycols]
# if Y.shape[1] == 1:
# Y = Y.flatten()
# np.testing.assert_almost_equal(new_table.Y, Y)
# np.testing.assert_almost_equal(new_table.metas, magic[rows, mcols])
# np.testing.assert_almost_equal(new_table.W, old_table.W[rows])
#
#
#def isspecial(s):
# return isinstance(s, slice) or s is Ellipsis
#
#
#def split_columns(indices, t):
# a, c, m = column_sizes(t)
# if indices is ...:
# return slice(a), slice(c), slice(m)
# elif isinstance(indices, slice):
# return indices, slice(0, 0), slice(0, 0)
# elif not isinstance(indices, list) and not isinstance(indices, tuple):
# indices = [indices]
# return (
# [t.domain.index(x)
# for x in indices if 0 <= t.domain.index(x) < a] or slice(0, 0),
# [t.domain.index(x) - a
# for x in indices if t.domain.index(x) >= a] or slice(0, 0),
# [-t.domain.index(x) - 1
# for x in indices if t.domain.index(x) < 0] or slice(0, 0))
#
#
#def getname(variable):
# return variable.name
#
#
#class TableIndexingTests(TableTests):
# def setUp(self):
# super().setUp()
# d = self.domain = \
# self.create_domain(self.attributes, self.class_vars, self.metas)
# t = self.table = \
# data.Table(self.domain, self.data, self.class_data, self.meta_data)
# self.magic_table = \
# np.column_stack((self.table.X, self.table.Y,
# self.table.metas[:, ::-1]))
#
# self.rows = [0, -1]
# self.multiple_rows = [slice(0, 0), ..., slice(1, -1, -1)]
# a, c, m = column_sizes(t)
# columns = [0, a - 1, a, a + c - 1, -1, -m]
# self.columns = chain(columns,
# map(lambda x: d[x], columns),
# map(lambda x: d[x].name, columns))
# self.multiple_columns = chain(
# self.multiple_rows,
# [d.attributes, d.class_vars, d.metas, [0, a, -1]],
# [self.attributes, self.class_vars, self.metas],
# [self.attributes + self.class_vars + self.metas])
#
# # TODO: indexing with [[0,1], [0,1]] produces weird results
# # TODO: what should be the results of table[1, :]
#
# def test_can_select_a_single_value(self):
# for r in self.rows:
# for c in self.columns:
# value = self.table[r, c]
# self.assertAlmostEqual(
# value, self.magic_table[r, self.domain.index(c)])
#
# value = self.table[r][c]
# self.assertAlmostEqual(
# value, self.magic_table[r, self.domain.index(c)])
#
# def test_can_select_a_single_row(self):
# for r in self.rows:
# row = self.table[r]
# new_row = np.hstack(
# (self.data[r, :],
# self.class_data[r, None]))
# np.testing.assert_almost_equal(
# np.array(list(row)), new_row)
#
#
# def test_can_select_a_subset_of_rows_and_columns(self):
# for r in self.rows:
# for c in self.multiple_columns:
# table = self.table[r, c]
#
# attr, cls, metas = split_columns(c, self.table)
# X = self.table.X[[r], attr]
# if X.ndim == 1:
# X = X.reshape(-1, len(table.domain.attributes))
# np.testing.assert_almost_equal(table.X, X)
# Y = self.table.Y[:, None][[r], cls]
# if len(Y.shape) == 1 or Y.shape[1] == 1:
# Y = Y.flatten()
# np.testing.assert_almost_equal(table.Y, Y)
# metas_ = self.table.metas[[r], metas]
# if metas_.ndim == 1:
# metas_ = metas_.reshape(-1, len(table.domain.metas))
# np.testing.assert_almost_equal(table.metas, metas_)
#
# for r in self.multiple_rows:
# for c in chain(self.columns, self.multiple_rows):
# table = self.table[r, c]
#
# attr, cls, metas = split_columns(c, self.table)
# np.testing.assert_almost_equal(table.X, self.table.X[r, attr])
# Y = self.table.Y[:, None][r, cls]
# if len(Y.shape) > 1 and Y.shape[1] == 1:
# Y = Y.flatten()
# np.testing.assert_almost_equal(table.Y, Y)
# np.testing.assert_almost_equal(table.metas,
# self.table.metas[r, metas])
#
#
#class TableElementAssignmentTest(TableTests):
# def setUp(self):
# super().setUp()
# self.domain = \
# self.create_domain(self.attributes, self.class_vars, self.metas)
# self.table = \
# data.Table(self.domain, self.data, self.class_data, self.meta_data)
#
# def test_can_assign_values(self):
# self.table[0, 0] = 42.
# self.assertAlmostEqual(self.table.X[0, 0], 42.)
#
# def test_can_assign_values_to_classes(self):
# a, c, m = column_sizes(self.table)
# self.table[0, a] = 42.
# self.assertAlmostEqual(self.table.Y[0], 42.)
#
# def test_can_assign_values_to_metas(self):
# self.table[0, -1] = 42.
# self.assertAlmostEqual(self.table.metas[0, 0], 42.)
#
# def test_can_assign_rows_to_rows(self):
# self.table[0] = self.table[1]
# np.testing.assert_almost_equal(
# self.table.X[0], self.table.X[1])
# np.testing.assert_almost_equal(
# self.table.Y[0], self.table.Y[1])
# np.testing.assert_almost_equal(
# self.table.metas[0], self.table.metas[1])
#
# def test_can_assign_lists(self):
# a, c, m = column_sizes(self.table)
# new_example = [float(i)
# for i in range(len(self.attributes + self.class_vars))]
# self.table[0] = new_example
# np.testing.assert_almost_equal(
# self.table.X[0], np.array(new_example[:a]))
# np.testing.assert_almost_equal(
# self.table.Y[0], np.array(new_example[a:]))
#
# def test_can_assign_np_array(self):
# a, c, m = column_sizes(self.table)
# new_example = \
# np.array([float(i)
# for i in range(len(self.attributes + self.class_vars))])
# self.table[0] = new_example
# np.testing.assert_almost_equal(self.table.X[0], new_example[:a])
# np.testing.assert_almost_equal(self.table.Y[0], new_example[a:])
#
#
#class InterfaceTest(unittest.TestCase):
# """Basic tests each implementation of Table should pass."""
#
# features = (
# data.ContinuousVariable(name="Continuous Feature 1"),
# data.ContinuousVariable(name="Continuous Feature 2"),
# data.DiscreteVariable(name="Discrete Feature 1", values=[0, 1]),
# data.DiscreteVariable(name="Discrete Feature 2", values=["value1", "value2"]),
# )
#
# class_vars = (
# data.ContinuousVariable(name="Continuous Class"),
# data.DiscreteVariable(name="Discrete Class")
# )
#
# feature_data = (
# (1, 0, 0, 0),
# (0, 1, 0, 0),
# (0, 0, 1, 0),
# (0, 0, 0, 1),
# )
#
# class_data = (
# (1, 0),
# (0, 1),
# (1, 0),
# (0, 1)
# )
#
# data = tuple(a + c for a, c in zip(feature_data, class_data))
#
# nrows = 4
#
# def setUp(self):
# self.domain = data.Domain(attributes=self.features, class_vars=self.class_vars)
# self.table = data.Table.from_numpy(
# self.domain,
# np.array(self.feature_data),
# np.array(self.class_data),
# )
#
# def test_len(self):
# self.assertEqual(len(self.table), self.nrows)
#
# def test_row_len(self):
# for i in range(self.nrows):
# self.assertEqual(len(self.table[i]), len(self.data[i]))
#
# def test_iteration(self):
# for row, expected_data in zip(self.table, self.data):
# self.assertEqual(tuple(row), expected_data)
#
# def test_row_indexing(self):
# for i in range(self.nrows):
# self.assertEqual(tuple(self.table[i]), self.data[i])
#
# def test_row_slicing(self):
# t = self.table[1:]
# self.assertEqual(len(t), self.nrows - 1)
#
# def test_value_indexing(self):
# for i in range(self.nrows):
# for j in range(len(self.table[i])):
# self.assertEqual(self.table[i, j], self.data[i][j])
#
# def test_row_assignment(self):
# new_value = 2.
# for i in range(self.nrows):
# new_row = [new_value] * len(self.data[i])
# self.table[i] = np.array(new_row)
# self.assertEqual(list(self.table[i]), new_row)
#
# def test_value_assignment(self):
# new_value = 0.
# for i in range(self.nrows):
# for j in range(len(self.table[i])):
# self.table[i, j] = new_value
# self.assertEqual(self.table[i, j], new_value)
#
# def test_append_rows(self):
# new_value = 2
# new_row = [new_value] * len(self.data[0])
# self.table.append(new_row)
# self.assertEqual(list(self.table[-1]), new_row)
#
# def test_insert_rows(self):
# new_value = 2
# new_row = [new_value] * len(self.data[0])
# self.table.insert(0, new_row)
# self.assertEqual(list(self.table[0]), new_row)
# for row, expected in zip(self.table[1:], self.data):
# self.assertEqual(tuple(row), expected)
#
# def test_delete_rows(self):
# for i in range(self.nrows):
# del self.table[0]
# for j in range(len(self.table)):
# self.assertEqual(tuple(self.table[j]), self.data[i + j + 1])
#
# def test_clear(self):
# self.table.clear()
# self.assertEqual(len(self.table), 0)
# for i in self.table:
# self.fail("Table should not contain any rows.")
#
#
#class TestRowInstance(unittest.TestCase):
# def test_assignment(self):
# table = data.Table("zoo")
# inst = table[2]
# self.assertIsInstance(inst, data.RowInstance)
#
# inst[1] = 0
# self.assertEqual(table[2, 1], 0)
# inst[1] = 1
# self.assertEqual(table[2, 1], 1)
#
# inst.set_class("mammal")
# self.assertEqual(table[2, len(table.domain.attributes)], "mammal")
# inst.set_class("fish")
# self.assertEqual(table[2, len(table.domain.attributes)], "fish")
#
# inst[-1] = "Foo"
# self.assertEqual(table[2, -1], "Foo")
#
# def test_iteration_with_assignment(self):
# table = data.Table("iris")
# for i, row in enumerate(table):
# row[0] = i
# np.testing.assert_array_equal(table.X[:, 0], np.arange(len(table)))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
itsjeyd/edx-platform | lms/djangoapps/certificates/tests/tests.py | 15 | 6170 | """
Tests for the certificates models.
"""
from ddt import ddt, data, unpack
from mock import patch
from django.conf import settings
from nose.plugins.attrib import attr
from badges.tests.factories import CourseCompleteImageConfigurationFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from certificates.models import (
CertificateStatuses,
GeneratedCertificate,
certificate_status_for_student,
certificate_info_for_user
)
from certificates.tests.factories import GeneratedCertificateFactory
from util.milestones_helpers import (
set_prerequisite_courses,
milestones_achieved_by_user,
)
from milestones.tests.utils import MilestonesTestCaseMixin
@attr(shard=1)
@ddt
class CertificatesModelTest(ModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Tests for the GeneratedCertificate model
"""
def test_certificate_status_for_student(self):
student = UserFactory()
course = CourseFactory.create(org='edx', number='verified', display_name='Verified Course')
certificate_status = certificate_status_for_student(student, course.id)
self.assertEqual(certificate_status['status'], CertificateStatuses.unavailable)
self.assertEqual(certificate_status['mode'], GeneratedCertificate.MODES.honor)
@unpack
@data(
{'allow_certificate': False, 'whitelisted': False, 'grade': None, 'output': ['N', 'N', 'N/A']},
{'allow_certificate': True, 'whitelisted': True, 'grade': None, 'output': ['Y', 'N', 'N/A']},
{'allow_certificate': True, 'whitelisted': False, 'grade': 0.9, 'output': ['Y', 'N', 'N/A']},
{'allow_certificate': False, 'whitelisted': True, 'grade': 0.8, 'output': ['N', 'N', 'N/A']},
{'allow_certificate': False, 'whitelisted': None, 'grade': 0.8, 'output': ['N', 'N', 'N/A']}
)
def test_certificate_info_for_user(self, allow_certificate, whitelisted, grade, output):
"""
Verify that certificate_info_for_user works.
"""
student = UserFactory()
course = CourseFactory.create(org='edx', number='verified', display_name='Verified Course')
student.profile.allow_certificate = allow_certificate
student.profile.save()
certificate_info = certificate_info_for_user(student, course.id, grade, whitelisted)
self.assertEqual(certificate_info, output)
@unpack
@data(
{'allow_certificate': False, 'whitelisted': False, 'grade': None, 'output': ['N', 'Y', 'honor']},
{'allow_certificate': True, 'whitelisted': True, 'grade': None, 'output': ['Y', 'Y', 'honor']},
{'allow_certificate': True, 'whitelisted': False, 'grade': 0.9, 'output': ['Y', 'Y', 'honor']},
{'allow_certificate': False, 'whitelisted': True, 'grade': 0.8, 'output': ['N', 'Y', 'honor']},
{'allow_certificate': False, 'whitelisted': None, 'grade': 0.8, 'output': ['N', 'Y', 'honor']}
)
def test_certificate_info_for_user_when_grade_changes(self, allow_certificate, whitelisted, grade, output):
"""
Verify that certificate_info_for_user works as expect in scenario when grading of problems
changes after certificates already generated. In such scenario `Certificate delivered` should not depend
on student's eligibility to get certificates since in above scenario eligibility can change over period
of time.
"""
student = UserFactory()
course = CourseFactory.create(org='edx', number='verified', display_name='Verified Course')
student.profile.allow_certificate = allow_certificate
student.profile.save()
GeneratedCertificateFactory.create(
user=student,
course_id=course.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
certificate_info = certificate_info_for_user(student, course.id, grade, whitelisted)
self.assertEqual(certificate_info, output)
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_course_milestone_collected(self):
student = UserFactory()
course = CourseFactory.create(org='edx', number='998', display_name='Test Course')
pre_requisite_course = CourseFactory.create(org='edx', number='999', display_name='Pre requisite Course')
# set pre-requisite course
set_prerequisite_courses(course.id, [unicode(pre_requisite_course.id)])
# get milestones collected by user before completing the pre-requisite course
completed_milestones = milestones_achieved_by_user(student, unicode(pre_requisite_course.id))
self.assertEqual(len(completed_milestones), 0)
GeneratedCertificateFactory.create(
user=student,
course_id=pre_requisite_course.id,
status=CertificateStatuses.generating,
mode='verified'
)
# get milestones collected by user after user has completed the pre-requisite course
completed_milestones = milestones_achieved_by_user(student, unicode(pre_requisite_course.id))
self.assertEqual(len(completed_milestones), 1)
self.assertEqual(completed_milestones[0]['namespace'], unicode(pre_requisite_course.id))
@patch.dict(settings.FEATURES, {'ENABLE_OPENBADGES': True})
@patch('badges.backends.badgr.BadgrBackend', spec=True)
def test_badge_callback(self, handler):
student = UserFactory()
course = CourseFactory.create(org='edx', number='998', display_name='Test Course', issue_badges=True)
CourseCompleteImageConfigurationFactory()
CourseEnrollmentFactory(user=student, course_id=course.location.course_key, mode='honor')
cert = GeneratedCertificateFactory.create(
user=student,
course_id=course.id,
status=CertificateStatuses.generating,
mode='verified'
)
cert.status = CertificateStatuses.downloadable
cert.save()
self.assertTrue(handler.return_value.award.called)
| agpl-3.0 |
arnedesmedt/dotfiles | .config/sublime-text-3/Packages.symlinkfollow/pygments/all/pygments/unistring.py | 51 | 51150 | # -*- coding: utf-8 -*-
"""
pygments.unistring
~~~~~~~~~~~~~~~~~~
Strings of all Unicode characters of a certain category.
Used for matching in Unicode-aware languages. Run to regenerate.
Inspired by chartypes_create.py from the MoinMoin project.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
Cc = u'\x00-\x1f\x7f-\x9f'
Cf = u'\xad\u0600-\u0604\u061c\u06dd\u070f\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb'
Cn = u'\u0378-\u0379\u037f-\u0383\u038b\u038d\u03a2\u0528-\u0530\u0557-\u0558\u0560\u0588\u058b-\u058e\u0590\u05c8-\u05cf\u05eb-\u05ef\u05f5-\u05ff\u0605\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07ff\u082e-\u082f\u083f\u085c-\u085d\u085f-\u089f\u08a1\u08ad-\u08e3\u08ff\u0978\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09fc-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a76-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5a-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c80-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0d01\u0d04\u0d0d\u0d11\u0d3b-\u0d3c\u0d45\u0d49\u0d4f-\u0d56\u0d58-\u0d5f\u0d64-\u0d65\u0d76-\u0d78\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f5-\u13ff\u169d-\u169f\u16f1-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1878-\u187f\u18ab-\u18af\u18f6-\u18ff\u191d-\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c80-\u1cbf\u1cc8-\u1ccf\u1cf7-\u1cff\u1de7-\u1dfb\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20bb-\u20cf\u20f1-\u20ff\u218a-\u218f\u23f4-\u23ff\u2427-\u243f\u244b-\u245f\u2700\u2b4d-\u2b4f\u2b5a-\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e3c-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u312e-\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9fcd-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua698-\ua69e\ua6f8-\ua6ff\ua78f\ua794-\ua79f\ua7ab-\ua7f7\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c5-\ua8cd\ua8da-\ua8df\ua8fc-\ua8ff\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9e0-\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaa7c-\uaa7f\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f-\uabbf\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe27-\ufe2f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff'
Co = u'\ue000-\uf8ff'
try:
Cs = eval(r"u'\ud800-\udbff\\\udc00\udc01-\udfff'")
except UnicodeDecodeError:
Cs = '' # Jython can't handle isolated surrogates
Ll = u'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0561-\u0587\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7fa\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a'
Lm = u'\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\uaa70\uaadd\uaaf3-\uaaf4\uff70\uff9e-\uff9f'
Lo = u'\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05f0-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0977\u0979-\u097f\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58-\u0c59\u0c60-\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10d0-\u10fa\u10fd-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc'
Lt = u'\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc'
Lu = u'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa\uff21-\uff3a'
Mc = u'\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u19b0-\u19c0\u19c8-\u19c9\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1bac-\u1bad\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec'
Me = u'\u0488-\u0489\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672'
Mn = u'\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08e4-\u08fe\u0900-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1dc0-\u1de6\u1dfc-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4\ua8e0-\ua8f1\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe26'
Nd = u'0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19'
Nl = u'\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef'
No = u'\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d70-\u0d75\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835'
Pc = u'_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f'
Pd = u'\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d'
Pe = u')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3f\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
Pf = u'\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21'
Pi = u'\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20'
Po = u"!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u0af0\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65"
Ps = u'(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3e\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
Sc = u'$\xa2-\xa5\u058f\u060b\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20ba\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6'
Sk = u'\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\ufbb2-\ufbc1\uff3e\uff40\uffe3'
Sm = u'+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec'
So = u'\xa6\xa9\xae\xb0\u0482\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u23f3\u2400-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u26ff\u2701-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b50-\u2b59\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd'
Zl = u'\u2028'
Zp = u'\u2029'
Zs = u' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000'
xid_continue = u'0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05f0-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u0800-\u082d\u0840-\u085b\u08a0\u08a2-\u08ac\u08e4-\u08fe\u0900-\u0963\u0966-\u096f\u0971-\u0977\u0979-\u097f\u0981-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c59\u0c60-\u0c63\u0c66-\u0c6f\u0c82-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d02-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d57\u0d60-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1877\u1880-\u18aa\u18b0-\u18f5\u1900-\u191c\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1cd0-\u1cd2\u1cd4-\u1cf6\u1d00-\u1de6\u1dfc-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua697\ua69f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua827\ua840-\ua873\ua880-\ua8c4\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua900-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaa7b\uaa80-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe26\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc'
xid_start = u'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58-\u0c59\u0c60-\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc'
if sys.maxunicode > 0xFFFF:
# non-BMP characters, use only on wide Unicode builds
Cf += u'\U000110bd\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f'
Cn += u'\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018b-\U0001018f\U0001019c-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102ff\U0001031f\U00010324-\U0001032f\U0001034b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U00010860-\U000108ff\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bd\U000109c0-\U000109ff\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a34-\U00010a37\U00010a3b-\U00010a3e\U00010a48-\U00010a4f\U00010a59-\U00010a5f\U00010a80-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b80-\U00010bff\U00010c49-\U00010e5f\U00010e7f-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107f\U000110c2-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011144-\U0001117f\U000111c9-\U000111cf\U000111da-\U0001167f\U000116b8-\U000116bf\U000116ca-\U00011fff\U0001236f-\U000123ff\U00012463-\U0001246f\U00012474-\U00012fff\U0001342f-\U000167ff\U00016a39-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U0001afff\U0001b002-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1de-\U0001d1ff\U0001d246-\U0001d2ff\U0001d357-\U0001d35f\U0001d372-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001d800-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0bf-\U0001f0c0\U0001f0d0\U0001f0e0-\U0001f0ff\U0001f10b-\U0001f10f\U0001f12f\U0001f16c-\U0001f16f\U0001f19b-\U0001f1e5\U0001f203-\U0001f20f\U0001f23b-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f2ff\U0001f321-\U0001f32f\U0001f336\U0001f37d-\U0001f37f\U0001f394-\U0001f39f\U0001f3c5\U0001f3cb-\U0001f3df\U0001f3f1-\U0001f3ff\U0001f43f\U0001f441\U0001f4f8\U0001f4fd-\U0001f4ff\U0001f53e-\U0001f53f\U0001f544-\U0001f54f\U0001f568-\U0001f5fa\U0001f641-\U0001f644\U0001f650-\U0001f67f\U0001f6c6-\U0001f6ff\U0001f774-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff'
Co += u'\U000f0000-\U000ffffd\U00100000-\U0010fffd'
Ll += u'\U00010428-\U0001044f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb'
Lm += u'\U00016f93-\U00016f9f'
Lo += u'\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031e\U00010330-\U00010340\U00010342-\U00010349\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a60-\U00010a7c\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010c00-\U00010c48\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011183-\U000111b2\U000111c1-\U000111c4\U00011680-\U000116aa\U00012000-\U0001236e\U00013000-\U0001342e\U00016800-\U00016a38\U00016f00-\U00016f44\U00016f50\U0001b000-\U0001b001\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002f800-\U0002fa1d'
Lu += u'\U00010400-\U00010427\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca'
Mc += u'\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U000116ac\U000116ae-\U000116af\U000116b6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172'
Mn += u'\U000101fd\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00011001\U00011038-\U00011046\U00011080-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011180-\U00011181\U000111b6-\U000111be\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U00016f8f-\U00016f92\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U000e0100-\U000e01ef'
Nd += u'\U000104a0-\U000104a9\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000116c0-\U000116c9\U0001d7ce-\U0001d7ff'
Nl += u'\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U00012462'
No += u'\U00010107-\U00010133\U00010175-\U00010178\U0001018a\U00010320-\U00010323\U00010858-\U0001085f\U00010916-\U0001091b\U00010a40-\U00010a47\U00010a7d-\U00010a7e\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010e60-\U00010e7e\U00011052-\U00011065\U0001d360-\U0001d371\U0001f100-\U0001f10a'
Po += u'\U00010100-\U00010102\U0001039f\U000103d0\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010b39-\U00010b3f\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U000111c5-\U000111c8\U00012470-\U00012473'
Sm += u'\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1'
So += u'\U00010137-\U0001013f\U00010179-\U00010189\U00010190-\U0001019b\U000101d0-\U000101fc\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1dd\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0be\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0df\U0001f110-\U0001f12e\U0001f130-\U0001f16b\U0001f170-\U0001f19a\U0001f1e6-\U0001f202\U0001f210-\U0001f23a\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f300-\U0001f320\U0001f330-\U0001f335\U0001f337-\U0001f37c\U0001f380-\U0001f393\U0001f3a0-\U0001f3c4\U0001f3c6-\U0001f3ca\U0001f3e0-\U0001f3f0\U0001f400-\U0001f43e\U0001f440\U0001f442-\U0001f4f7\U0001f4f9-\U0001f4fc\U0001f500-\U0001f53d\U0001f540-\U0001f543\U0001f550-\U0001f567\U0001f5fb-\U0001f640\U0001f645-\U0001f64f\U0001f680-\U0001f6c5\U0001f700-\U0001f773'
xid_continue += u'\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031e\U00010330-\U0001034a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010c00-\U00010c48\U00011000-\U00011046\U00011066-\U0001106f\U00011080-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011180-\U000111c4\U000111d0-\U000111d9\U00011680-\U000116b7\U000116c0-\U000116c9\U00012000-\U0001236e\U00012400-\U00012462\U00013000-\U0001342e\U00016800-\U00016a38\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U0001b000-\U0001b001\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002f800-\U0002fa1d\U000e0100-\U000e01ef'
xid_start += u'\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031e\U00010330-\U0001034a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a60-\U00010a7c\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010c00-\U00010c48\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011183-\U000111b2\U000111c1-\U000111c4\U00011680-\U000116aa\U00012000-\U0001236e\U00012400-\U00012462\U00013000-\U0001342e\U00016800-\U00016a38\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U0001b000-\U0001b001\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002f800-\U0002fa1d'
cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs']
# Generated from unidata 6.3.0
def combine(*args):
return u''.join(globals()[cat] for cat in args)
def allexcept(*args):
newcats = cats[:]
for arg in args:
newcats.remove(arg)
return u''.join(globals()[cat] for cat in newcats)
def _handle_runs(char_list): # pragma: no cover
buf = []
for c in char_list:
if len(c) == 1:
if buf and buf[-1][1] == chr(ord(c)-1):
buf[-1] = (buf[-1][0], c)
else:
buf.append((c, c))
else:
buf.append((c, c))
for a, b in buf:
if a == b:
yield a
else:
yield u'%s-%s' % (a, b)
if __name__ == '__main__': # pragma: no cover
import unicodedata
# we need Py3 for the determination of the XID_* properties
if sys.version_info[:2] < (3, 3):
raise RuntimeError('this file must be regenerated with Python 3.3+')
categories_bmp = {'xid_start': [], 'xid_continue': []}
categories_nonbmp = {'xid_start': [], 'xid_continue': []}
with open(__file__) as fp:
content = fp.read()
header = content[:content.find('Cc =')]
footer = content[content.find("def combine("):]
for code in range(0x110000):
c = chr(code)
cat = unicodedata.category(c)
if ord(c) == 0xdc00:
# Hack to avoid combining this combining with the preceeding high
# surrogate, 0xdbff, when doing a repr.
c = u'\\' + c
elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e):
# Escape regex metachars.
c = u'\\' + c
cat_dic = categories_bmp if code < 0x10000 else categories_nonbmp
cat_dic.setdefault(cat, []).append(c)
# XID_START and XID_CONTINUE are special categories used for matching
# identifiers in Python 3.
if c.isidentifier():
cat_dic['xid_start'].append(c)
if ('a' + c).isidentifier():
cat_dic['xid_continue'].append(c)
with open(__file__, 'w') as fp:
fp.write(header)
for cat in sorted(categories_bmp):
val = u''.join(_handle_runs(categories_bmp[cat]))
if cat == 'Cs':
# Jython can't handle isolated surrogates
fp.write("""\
try:
Cs = eval(r"u%s")
except UnicodeDecodeError:
Cs = '' # Jython can't handle isolated surrogates\n\n""" % ascii(val))
else:
fp.write('%s = u%a\n\n' % (cat, val))
fp.write('if sys.maxunicode > 0xFFFF:\n')
fp.write(' # non-BMP characters, use only on wide Unicode builds\n')
for cat in sorted(categories_nonbmp):
# no special case for Cs needed, since there are no surrogates
# in the higher planes
val = u''.join(_handle_runs(categories_nonbmp[cat]))
fp.write(' %s += u%a\n\n' % (cat, val))
cats = sorted(categories_bmp)
cats.remove('xid_start')
cats.remove('xid_continue')
fp.write('cats = %r\n\n' % cats)
fp.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,))
fp.write(footer)
| mit |
KSanthanam/rethinkdb | external/v8_3.30.33.16/build/gyp/test/same-target-name-different-directory/gyptest-all.py | 242 | 1179 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test cases when multiple targets in different directories have the same name.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['android', 'ninja', 'make'])
test.run_gyp('subdirs.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Test that we build all targets.
test.build('subdirs.gyp', 'target', chdir='relocate/src')
test.must_exist('relocate/src/subdir1/action1.txt')
test.must_exist('relocate/src/subdir2/action2.txt')
# Test that we build all targets using the correct actions, even if they have
# the same names.
test.build('subdirs.gyp', 'target_same_action_name', chdir='relocate/src')
test.must_exist('relocate/src/subdir1/action.txt')
test.must_exist('relocate/src/subdir2/action.txt')
# Test that we build all targets using the correct rules, even if they have
# the same names.
test.build('subdirs.gyp', 'target_same_rule_name', chdir='relocate/src')
test.must_exist('relocate/src/subdir1/rule.txt')
test.must_exist('relocate/src/subdir2/rule.txt')
test.pass_test()
| agpl-3.0 |
rajatchopra/origin | vendor/github.com/google/certificate-transparency/python/ct/crypto/pem.py | 23 | 14910 | """Read and write PEM files and strings."""
import base64
import StringIO
from ct.crypto import error
class PemError(error.EncodingError):
pass
_START_TEMPLATE = "-----BEGIN %s-----"
_END_TEMPLATE = "-----END %s-----"
class PemReader(object):
"""A reader class for iteratively reading PEM files."""
def __init__(self, fileobj, markers, skip_invalid_blobs=True):
"""Create a PemReader from a file object.
When used as a context manager, the file object is closed
upon exit.
Args:
fileobj: the file object to read from.
markers: an iterable of markers accepted by the reader, e.g.,
CERTIFICATE, RSA PUBLIC KEY, etc.
skip_invalid_blobs: if False, invalid PEM blobs cause a PemError.
If True, invalid blobs are skipped. In non-skip mode, an
immediate StopIteration before any valid blocks are found, also
causes a PemError exception.
Raises:
PemError: invalid PEM contents.
"""
self.__f = fileobj
self.__marker_dict = ({(_START_TEMPLATE % m): m for m in markers})
self.__valid_blobs_read = 0
self.__eof = False
self.__skip_invalid_blobs = skip_invalid_blobs
def __iter__(self):
"""Iterate over file contents.
Returns:
a generator function that yields decoded (blob, marker)
tuples.
"""
return self.read_blocks()
def close(self):
"""Close the underlying file object."""
self.__f.close()
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, traceback):
self.close()
@classmethod
def from_file(cls, pem_file, markers, skip_invalid_blobs=True):
"""Create a PemReader for reading a file.
Caller is responsible for closing the reader afterwards.
Args:
pem_file: the file to read from.
markers: an iterable of markers accepted by the reader,e.g.,
CERTIFICATE, RSA PUBLIC KEY, etc.
skip_invalid_blobs: if False, invalid PEM blobs cause a PemError.
If True, invalid blobs are skipped. In non-skip mode, an
immediate StopIteration before any valid blocks are found, also
causes a PemError exception.
Returns:
a PemReader object.
Raises:
IOError, ValueError: the fileobject could not be operated on.
"""
return cls(open(pem_file, "r"), markers, skip_invalid_blobs)
@classmethod
def from_string(cls, pem_string, markers, skip_invalid_blobs=True):
"""Create a PemReader for reading a string.
Args:
pem_string: the string to read from.
markers: an iterable of markers accepted by the reader, e.g.,
CERTIFICATE, RSA PUBLIC KEY, etc.
skip_invalid_blobs: if False, invalid PEM blobs cause a PemError.
If True, invalid blobs are skipped. In non-skip mode, an
immediate StopIteration before any valid blocks are found, also
causes a PemError exception.
Returns:
a PemReader object.
"""
f = StringIO.StringIO(pem_string)
return cls(f, markers, skip_invalid_blobs)
def read_blocks(self):
"""Read the next PEM blob.
Yields:
(raw_string, marker) tuples containing the decoded blob and the
marker used to detect the blob.
Raises:
PemError: a PEM block was invalid (in skip_invalid_blobs mode).
IOError, ValueError: the file object could not be operated on.
StopIteration: EOF was reached.
"""
while not self.__eof:
marker = None
for line in self.__f:
line = line.rstrip("\r\n")
# PEM (RFC 1421) allows arbitrary comments between PEM blocks
# so we skip over those
if line in self.__marker_dict:
marker = self.__marker_dict[line]
break
if not marker:
self.__eof = True
if (not self.__skip_invalid_blobs and
not self.__valid_blobs_read):
raise PemError("No PEM header")
raise StopIteration
ret = ""
footer = _END_TEMPLATE % marker
footer_found = False
for line in self.__f:
line = line.rstrip("\r\n")
if line == footer:
footer_found = True
break
ret += line
# Here, we assume that each header is exactly matched by a footer.
# TODO(ekasper): determine if this assumption is overly strict,
# i.e., whether blocks such as BEGIN RSA PUBLIC KEY...END PUBLIC KEY
# are commonly used in applications.
if not footer_found:
self.__eof = True
if not self.__skip_invalid_blobs:
raise PemError("No PEM footer line to match the header")
raise StopIteration
try:
# We don't use ret.decode('base64') here as the exceptions from
# this method are not properly documented.
yield base64.b64decode(ret), marker
self.__valid_blobs_read += 1
except TypeError:
if not self.__skip_invalid_blobs:
# We do not set EOF here so caller can resume - even though
# this can normally be transparently handled by setting
# skip_invalid_blobs to True upon init.
raise PemError("Invalid base64 encoding")
# Else just continue the loop
raise StopIteration
class PemWriter(object):
"""A class for writing PEM blobs."""
def __init__(self, fileobj, marker):
"""Create a writer.
When used as a context manager, the underlying file object is closed
upon exit.
Args:
fileobj: the file object to write to. Must be open for writing AND
reading, and must be positioned at the writing position. Rather
than initializing directly from a file object, it is recommended
to use the from_file() constructor.
marker: the marker to use in headers.
"""
self.__f = fileobj
self.__header = _START_TEMPLATE % marker
self.__footer = _END_TEMPLATE % marker
def close(self):
self.__f.close()
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, traceback):
self.close()
@classmethod
def from_file(cls, filename, marker, append=False):
"""Construct a writer for writing to a file.
Caller is responsible for closing the writer afterwards.
Args:
filename: the file to write to.
marker: the marker to use in headers/footers.
append: if True, file will be opened in append mode.
Returns:
A PemWriter object.
Raises:
IOError: the file could not be opened.
"""
mode = "a+" if append else "w+"
f = open(filename, mode)
if append:
f.seek(0, 2)
return cls(f, marker)
def write(self, blob, check_newline=True):
"""Write a single PEM blob.
Args:
blob: a binary blob.
check_newline: if True, check whether the current position is at
the beginning of a new line and add a newline if not.
Raises:
IOError: the file could not be written to.
"""
# Header must start on a new line, so we try to be helpful and add one
# if it's missing.
# Note that a file open'ed in a+ mode will report its current reading
# (rather than writing) position - we deem it the caller's
# responsibility to seek to the write position. Failing that, the worst
# that can happen is either we fail to heal it, or add an extra newline.
if check_newline:
if self.__f.tell() != 0:
self.__f.seek(-1, 1)
if self.__f.read(1) != "\n":
self.__f.write("\n")
self.__f.write(self.__header)
pem_blob = base64.b64encode(blob)
for i in range(0, len(pem_blob), 64):
self.__f.write("\n")
self.__f.write(pem_blob[i:i+64])
self.__f.write("\n")
self.__f.write(self.__footer)
self.__f.write("\n")
def write_blocks(self, blobs):
"""Write PEM blobs.
Args:
blobs: an iterable of binary blobs.
Raises:
IOError: the file could not be written to.
"""
check_newline = True
for b in blobs:
self.write(b, check_newline=check_newline)
check_newline = False
@classmethod
def pem_string(cls, blob, marker):
"""Convert a binary blob to a PEM string.
Args:
blob: a single binary blob.
marker: the marker to use in headers/footers.
Returns:
a string of concatenated PEM blobs.
"""
stringio = StringIO.StringIO()
with cls(stringio, marker) as writer:
writer.write(blob)
return stringio.getvalue()
@classmethod
def blocks_to_pem_string(cls, blobs, marker):
"""Convert a binary blob to a PEM string.
Args:
blobs: an iterable of binary blobs.
marker: the marker to use in headers/footers.
Returns:
a string of concatenated PEM blobs.
"""
stringio = StringIO.StringIO()
with cls(stringio, marker) as writer:
writer.write_blocks(blobs)
return stringio.getvalue()
def from_pem(pem_string, markers):
"""Read a single PEM blob from a string.
Ignores everything before and after the first blob with valid markers.
Args:
pem_string: the PEM string.
markers: a single marker string or an iterable containing all
accepted markers, such as CERTIFICATE, RSA PUBLIC KEY,
PUBLIC KEY, etc.
Returns:
A (raw_string, marker) tuple containing the decoded blob and the
marker used to detect the blob.
Raises:
PemError: a PEM block was invalid or no valid PEM block was found.
"""
with PemReader.from_string(pem_string, markers,
skip_invalid_blobs=False) as reader:
return iter(reader).next()
def from_pem_file(pem_file, markers):
"""Read a single PEM blob from a file.
Ignores everything before and after the first blob with valid markers.
Args:
pem_file: the PEM file.
markers: a single marker string or an iterable containing all
accepted markers, such as CERTIFICATE, RSA PUBLIC KEY,
PUBLIC KEY, etc.
Returns:
A (raw_string, marker) tuple containing the decoded blob and the
marker used to detect the blob.
Raises:
PemError: a PEM block was invalid or no valid PEM block was found.
IOError: the file could not be read.
"""
with PemReader.from_file(pem_file, markers,
skip_invalid_blobs=False) as reader:
return iter(reader).next()
def pem_blocks(pem_string, markers, skip_invalid_blobs=True):
"""Read PEM blobs from a string.
Args:
pem_string: the PEM string.
markers: a single marker string or an iterable containing all
accepted markers, such as CERTIFICATE, RSA PUBLIC KEY,
PUBLIC KEY, etc.
skip_invalid_blobs: if False, invalid PEM blobs cause a PemError.
If True, invalid blobs are skipped. In non-skip mode, an immediate
StopIteration before any valid blocks are found, also causes a
a PemError exception.
Yields:
(raw_string, marker) tuples containing the decoded blob and the marker
used to detect the blob.
Raises:
PemError: a PEM block was invalid.
"""
with PemReader.from_string(pem_string, markers,
skip_invalid_blobs=skip_invalid_blobs) as reader:
for block in reader:
yield block
def pem_blocks_from_file(pem_file, markers, skip_invalid_blobs=True):
"""Read PEM blobs from a file.
Args:
pem_file: the PEM file.
markers: a single marker string or an iterable containing all accepted
markers, such as CERTIFICATE, RSA PUBLIC KEY, PUBLIC KEY, etc.
skip_invalid_blobs: if False, invalid PEM blobs cause a PemError.
If True, invalid blobs are skipped. In non-skip mode, an immediate
StopIteration before any valid blocks are found, also causes a
PemError exception.
Yields:
(raw_string, marker) tuples containing the decoded blob and the marker
used to detect the blob.
Raises:
PemError: a PEM block was invalid.
"""
with PemReader.from_file(pem_file, markers,
skip_invalid_blobs=skip_invalid_blobs) as reader:
for block in reader:
yield block
def to_pem(blob, marker):
"""Convert a binary blob to a PEM-formatted string.
Args:
blob: a binary blob.
marker: the marker to use, e.g., CERTIFICATE.
Returns:
the PEM string.
"""
return PemWriter.pem_string(blob, marker)
def blocks_to_pem(blobs, marker):
"""Convert binary blobs to a string of concatenated PEM-formatted blocks.
Args:
blobs: an iterable of binary blobs
marker: the marker to use, e.g., CERTIFICATE
Returns:
the PEM string.
"""
return PemWriter.blocks_to_pem_string(blobs, marker)
def to_pem_file(blob, filename, marker):
"""Convert a binary blob to PEM format and write to file.
Args:
blob: a binary blob.
filename: the file to write to.
marker: the marker to use, e.g., CERTIFICATE.
Raises:
IOError: the file could not be written to.
"""
with PemWriter.from_file(filename, marker) as writer:
writer.write(blob)
def blocks_to_pem_file(blobs, filename, marker):
"""Convert binary blobs to PEM format and write to file.
Blobs must all be of one and the same type.
Args:
blobs: an iterable of binary blobs.
filename: the file to write to.
marker: the marker to use, e.g., CERTIFICATE.
Raises:
IOError: the file could not be written to.
"""
with PemWriter.from_file(filename, marker) as writer:
writer.write_blocks(blobs)
| apache-2.0 |
VishvajitP/django-extensions | django_extensions/mongodb/fields/encrypted.py | 28 | 2037 | """
Encrypted fields from Django Extensions, modified for use with mongoDB
"""
from django import forms
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from mongoengine.base import BaseField
try:
from keyczar import keyczar
except ImportError:
raise ImportError('Using an encrypted field requires the Keyczar module. You can obtain Keyczar from http://www.keyczar.org/.')
class BaseEncryptedField(BaseField):
prefix = 'enc_str:::'
def __init__(self, *args, **kwargs):
if not hasattr(settings, 'ENCRYPTED_FIELD_KEYS_DIR'):
raise ImproperlyConfigured('You must set settings.ENCRYPTED_FIELD_KEYS_DIR to your Keyczar keys directory.')
self.crypt = keyczar.Crypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR)
super(BaseEncryptedField, self).__init__(*args, **kwargs)
def to_python(self, value):
if (value.startswith(self.prefix)):
retval = self.crypt.Decrypt(value[len(self.prefix):])
else:
retval = value
return retval
def get_db_prep_value(self, value):
if not value.startswith(self.prefix):
value = self.prefix + self.crypt.Encrypt(value)
return value
class EncryptedTextField(BaseEncryptedField):
def get_internal_type(self):
return 'StringField'
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(EncryptedTextField, self).formfield(**defaults)
class EncryptedCharField(BaseEncryptedField):
def __init__(self, max_length=None, *args, **kwargs):
if max_length:
max_length += len(self.prefix)
super(EncryptedCharField, self).__init__(max_length=max_length, *args, **kwargs)
def get_internal_type(self):
return "StringField"
def formfield(self, **kwargs):
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(EncryptedCharField, self).formfield(**defaults)
| mit |
40223211/2015cd_midterm- | static/Brython3.1.1-20150328-091302/Lib/urllib/parse.py | 735 | 35170 | """Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
import re
import sys
import collections
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "urlencode", "parse_qs",
"parse_qsl", "quote", "quote_plus", "quote_from_bytes",
"unquote", "unquote_plus", "unquote_to_bytes"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp', 'tel']
# These are not actually used anymore, but should stay for backwards
# compatibility. (They are undocumented, but have a public-looking name.)
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
# XXX: Consider replacing with functools.lru_cache
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache and the quoters cache."""
_parse_cache.clear()
_safe_quoters.clear()
# Helpers for bytes handling
# For 3.2, we deliberately require applications that
# handle improperly quoted URLs to do their own
# decoding and encoding. If valid use cases are
# presented, we may relax this by using latin-1
# decoding internally for 3.3
_implicit_encoding = 'ascii'
_implicit_errors = 'strict'
def _noop(obj):
return obj
def _encode_result(obj, encoding=_implicit_encoding,
errors=_implicit_errors):
return obj.encode(encoding, errors)
def _decode_args(args, encoding=_implicit_encoding,
errors=_implicit_errors):
return tuple(x.decode(encoding, errors) if x else '' for x in args)
def _coerce_args(*args):
# Invokes decode if necessary to create str args
# and returns the coerced inputs along with
# an appropriate result coercion function
# - noop for str inputs
# - encoding function otherwise
str_input = isinstance(args[0], str)
for arg in args[1:]:
# We special-case the empty string to support the
# "scheme=''" default argument to some functions
if arg and isinstance(arg, str) != str_input:
raise TypeError("Cannot mix str and non-str arguments")
if str_input:
return args + (_noop,)
return _decode_args(args) + (_encode_result,)
# Result objects are more helpful than simple tuples
class _ResultMixinStr(object):
"""Standard approach to encoding parsed results from str to bytes"""
__slots__ = ()
def encode(self, encoding='ascii', errors='strict'):
return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self))
class _ResultMixinBytes(object):
"""Standard approach to decoding parsed results from bytes to str"""
__slots__ = ()
def decode(self, encoding='ascii', errors='strict'):
return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self))
class _NetlocResultMixinBase(object):
"""Shared methods for the parsed result objects containing a netloc element"""
__slots__ = ()
@property
def username(self):
return self._userinfo[0]
@property
def password(self):
return self._userinfo[1]
@property
def hostname(self):
hostname = self._hostinfo[0]
if not hostname:
hostname = None
elif hostname is not None:
hostname = hostname.lower()
return hostname
@property
def port(self):
port = self._hostinfo[1]
if port is not None:
port = int(port, 10)
# Return None on an illegal port
if not ( 0 <= port <= 65535):
return None
return port
class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition('@')
if have_info:
username, have_password, password = userinfo.partition(':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition('@')
_, have_open_br, bracketed = hostinfo.partition('[')
if have_open_br:
hostname, _, port = bracketed.partition(']')
_, have_port, port = port.partition(':')
else:
hostname, have_port, port = hostinfo.partition(':')
if not have_port:
port = None
return hostname, port
class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition(b'@')
if have_info:
username, have_password, password = userinfo.partition(b':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition(b'@')
_, have_open_br, bracketed = hostinfo.partition(b'[')
if have_open_br:
hostname, _, port = bracketed.partition(b']')
_, have_port, port = port.partition(b':')
else:
hostname, have_port, port = hostinfo.partition(b':')
if not have_port:
port = None
return hostname, port
from collections import namedtuple
_DefragResultBase = namedtuple('DefragResult', 'url fragment')
_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment')
_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment')
# For backwards compatibility, alias _NetlocResultMixinStr
# ResultBase is no longer part of the documented API, but it is
# retained since deprecating it isn't worth the hassle
ResultBase = _NetlocResultMixinStr
# Structured result objects for string data
class DefragResult(_DefragResultBase, _ResultMixinStr):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + '#' + self.fragment
else:
return self.url
class SplitResult(_SplitResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(_ParseResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Structured result objects for bytes data
class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + b'#' + self.fragment
else:
return self.url
class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Set up the encode/decode result pairs
def _fix_result_transcoding():
_result_pairs = (
(DefragResult, DefragResultBytes),
(SplitResult, SplitResultBytes),
(ParseResult, ParseResultBytes),
)
for _decoded, _encoded in _result_pairs:
_decoded._encoded_counterpart = _encoded
_encoded._decoded_counterpart = _decoded
_fix_result_transcoding()
del _fix_result_transcoding
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return _coerce_result(cached)
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
for c in url[:i]:
if c not in scheme_chars:
break
else:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i+1:]
if not rest or any(c not in '0123456789' for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
def urlunparse(components):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment, _coerce_result = (
_coerce_args(*components))
if params:
url = "%s;%s" % (url, params)
return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
def urlunsplit(components):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment, _coerce_result = (
_coerce_args(*components))
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return _coerce_result(url)
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
base, url, _coerce_result = _coerce_args(base, url)
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return _coerce_result(url)
if scheme in uses_netloc:
if netloc:
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
netloc = bnetloc
if path[:1] == '/':
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment)))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
url, _coerce_result = _coerce_args(url)
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
else:
frag = ''
defrag = url
return _coerce_result(DefragResult(defrag, frag))
_hexdig = '0123456789ABCDEFabcdef'
_hextobyte = {(a + b).encode(): bytes([int(a + b, 16)])
for a in _hexdig for b in _hexdig}
def unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextobyte[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
_asciire = re.compile('([\x00-\x7f]+)')
def unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
"""
parsed_result = {}
pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
Returns a list, as G-d intended.
"""
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return unquote(string, encoding, errors)
_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'abcdefghijklmnopqrstuvwxyz'
b'0123456789'
b'_.-')
_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE)
_safe_quoters = {}
class Quoter(collections.defaultdict):
"""A mapping from bytes (in range(0,256)) to strings.
String values are percent-encoded byte values, unless the key < 128, and
in the "safe" set (either the specified safe set, or default set).
"""
# Keeps a cache internally, using defaultdict, for efficiency (lookups
# of cached keys don't call Python code at all).
def __init__(self, safe):
"""safe: bytes object."""
self.safe = _ALWAYS_SAFE.union(safe)
def __repr__(self):
# Without this, will just display as a defaultdict
return "<Quoter %r>" % dict(self)
def __missing__(self, b):
# Handle a cache miss. Store quoted string in cache and return.
res = chr(b) if b in self.safe else '%{:02X}'.format(b)
self[b] = res
return res
def quote(string, safe='/', encoding=None, errors=None):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
string and safe may be either str or bytes objects. encoding must
not be specified if string is a str.
The optional encoding and errors parameters specify how to deal with
non-ASCII characters, as accepted by the str.encode method.
By default, encoding='utf-8' (characters are encoded with UTF-8), and
errors='strict' (unsupported characters raise a UnicodeEncodeError).
"""
if isinstance(string, str):
if not string:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
string = string.encode(encoding, errors)
else:
if encoding is not None:
raise TypeError("quote() doesn't support 'encoding' for bytes")
if errors is not None:
raise TypeError("quote() doesn't support 'errors' for bytes")
return quote_from_bytes(string, safe)
def quote_plus(string, safe='', encoding=None, errors=None):
"""Like quote(), but also replace ' ' with '+', as required for quoting
HTML form values. Plus signs in the original string are escaped unless
they are included in safe. It also does not have safe default to '/'.
"""
# Check if ' ' in string, where string may either be a str or bytes. If
# there are no spaces, the regular quote will produce the right answer.
if ((isinstance(string, str) and ' ' not in string) or
(isinstance(string, bytes) and b' ' not in string)):
return quote(string, safe, encoding, errors)
if isinstance(safe, str):
space = ' '
else:
space = b' '
string = quote(string, safe + space, encoding, errors)
return string.replace(' ', '+')
def quote_from_bytes(bs, safe='/'):
"""Like quote(), but accepts a bytes object rather than a str, and does
not perform string-to-bytes encoding. It always returns an ASCII string.
quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
"""
if not isinstance(bs, (bytes, bytearray)):
raise TypeError("quote_from_bytes() expected bytes")
if not bs:
return ''
if isinstance(safe, str):
# Normalize 'safe' by converting to bytes and removing non-ASCII chars
safe = safe.encode('ascii', 'ignore')
else:
safe = bytes([c for c in safe if c < 128])
if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
return bs.decode()
try:
quoter = _safe_quoters[safe]
except KeyError:
_safe_quoters[safe] = quoter = Quoter(safe).__getitem__
return ''.join([quoter(char) for char in bs])
def urlencode(query, doseq=False, safe='', encoding=None, errors=None):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The query arg may be either a string or a bytes type. When query arg is a
string, the safe, encoding and error parameters are sent the quote_plus for
encoding.
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object").with_traceback(tb)
l = []
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
else:
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
l.append(k + '=' + v)
elif isinstance(v, str):
v = quote_plus(v, safe, encoding, errors)
l.append(k + '=' + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_plus(elt, safe)
else:
elt = quote_plus(str(elt), safe, encoding, errors)
l.append(k + '=' + elt)
return '&'.join(l)
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# urllib.parse.unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
def to_bytes(url):
"""to_bytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed.
# XXX get rid of to_bytes()
if isinstance(url, str):
try:
url = url.encode("ASCII").decode()
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = str(url).strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$',re.S)
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$')
match = _portprog.match(host)
if match: return match.group(1, 2)
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError("no digits")
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
| gpl-3.0 |
GaetanCambier/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tmz.py | 30 | 1229 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class TMZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tmz\.com/videos/(?P<id>[^/]+)/?'
_TEST = {
'url': 'http://www.tmz.com/videos/0_okj015ty/',
'md5': '791204e3bf790b1426cb2db0706184c0',
'info_dict': {
'id': '0_okj015ty',
'url': 'http://tmz.vo.llnwd.net/o28/2014-03/13/0_okj015ty_0_rt8ro3si_2.mp4',
'ext': 'mp4',
'title': 'Kim Kardashian\'s Boobs Unlock a Mystery!',
'description': 'Did Kim Kardasain try to one-up Khloe by one-upping Kylie??? Or is she just showing off her amazing boobs?',
'thumbnail': r're:http://cdnbakmi\.kaltura\.com/.*thumbnail.*',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return {
'id': video_id,
'url': self._html_search_meta('VideoURL', webpage, fatal=True),
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._html_search_meta('ThumbURL', webpage),
}
| gpl-3.0 |
jbonofre/incubator-beam | sdks/python/apache_beam/runners/direct/direct_runner_test.py | 7 | 1440 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import unittest
import apache_beam as beam
from apache_beam.testing import test_pipeline
class DirectPipelineResultTest(unittest.TestCase):
def test_waiting_on_result_stops_executor_threads(self):
pre_test_threads = set(t.ident for t in threading.enumerate())
pipeline = test_pipeline.TestPipeline()
_ = (pipeline | beam.Create([{'foo': 'bar'}]))
result = pipeline.run()
result.wait_until_finish()
post_test_threads = set(t.ident for t in threading.enumerate())
new_threads = post_test_threads - pre_test_threads
self.assertEqual(len(new_threads), 0)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
emedinaa/contentbox | third_party/unidecode/x0c8.py | 4 | 4991 | data = (
'jeo', # 0x00
'jeog', # 0x01
'jeogg', # 0x02
'jeogs', # 0x03
'jeon', # 0x04
'jeonj', # 0x05
'jeonh', # 0x06
'jeod', # 0x07
'jeol', # 0x08
'jeolg', # 0x09
'jeolm', # 0x0a
'jeolb', # 0x0b
'jeols', # 0x0c
'jeolt', # 0x0d
'jeolp', # 0x0e
'jeolh', # 0x0f
'jeom', # 0x10
'jeob', # 0x11
'jeobs', # 0x12
'jeos', # 0x13
'jeoss', # 0x14
'jeong', # 0x15
'jeoj', # 0x16
'jeoc', # 0x17
'jeok', # 0x18
'jeot', # 0x19
'jeop', # 0x1a
'jeoh', # 0x1b
'je', # 0x1c
'jeg', # 0x1d
'jegg', # 0x1e
'jegs', # 0x1f
'jen', # 0x20
'jenj', # 0x21
'jenh', # 0x22
'jed', # 0x23
'jel', # 0x24
'jelg', # 0x25
'jelm', # 0x26
'jelb', # 0x27
'jels', # 0x28
'jelt', # 0x29
'jelp', # 0x2a
'jelh', # 0x2b
'jem', # 0x2c
'jeb', # 0x2d
'jebs', # 0x2e
'jes', # 0x2f
'jess', # 0x30
'jeng', # 0x31
'jej', # 0x32
'jec', # 0x33
'jek', # 0x34
'jet', # 0x35
'jep', # 0x36
'jeh', # 0x37
'jyeo', # 0x38
'jyeog', # 0x39
'jyeogg', # 0x3a
'jyeogs', # 0x3b
'jyeon', # 0x3c
'jyeonj', # 0x3d
'jyeonh', # 0x3e
'jyeod', # 0x3f
'jyeol', # 0x40
'jyeolg', # 0x41
'jyeolm', # 0x42
'jyeolb', # 0x43
'jyeols', # 0x44
'jyeolt', # 0x45
'jyeolp', # 0x46
'jyeolh', # 0x47
'jyeom', # 0x48
'jyeob', # 0x49
'jyeobs', # 0x4a
'jyeos', # 0x4b
'jyeoss', # 0x4c
'jyeong', # 0x4d
'jyeoj', # 0x4e
'jyeoc', # 0x4f
'jyeok', # 0x50
'jyeot', # 0x51
'jyeop', # 0x52
'jyeoh', # 0x53
'jye', # 0x54
'jyeg', # 0x55
'jyegg', # 0x56
'jyegs', # 0x57
'jyen', # 0x58
'jyenj', # 0x59
'jyenh', # 0x5a
'jyed', # 0x5b
'jyel', # 0x5c
'jyelg', # 0x5d
'jyelm', # 0x5e
'jyelb', # 0x5f
'jyels', # 0x60
'jyelt', # 0x61
'jyelp', # 0x62
'jyelh', # 0x63
'jyem', # 0x64
'jyeb', # 0x65
'jyebs', # 0x66
'jyes', # 0x67
'jyess', # 0x68
'jyeng', # 0x69
'jyej', # 0x6a
'jyec', # 0x6b
'jyek', # 0x6c
'jyet', # 0x6d
'jyep', # 0x6e
'jyeh', # 0x6f
'jo', # 0x70
'jog', # 0x71
'jogg', # 0x72
'jogs', # 0x73
'jon', # 0x74
'jonj', # 0x75
'jonh', # 0x76
'jod', # 0x77
'jol', # 0x78
'jolg', # 0x79
'jolm', # 0x7a
'jolb', # 0x7b
'jols', # 0x7c
'jolt', # 0x7d
'jolp', # 0x7e
'jolh', # 0x7f
'jom', # 0x80
'job', # 0x81
'jobs', # 0x82
'jos', # 0x83
'joss', # 0x84
'jong', # 0x85
'joj', # 0x86
'joc', # 0x87
'jok', # 0x88
'jot', # 0x89
'jop', # 0x8a
'joh', # 0x8b
'jwa', # 0x8c
'jwag', # 0x8d
'jwagg', # 0x8e
'jwags', # 0x8f
'jwan', # 0x90
'jwanj', # 0x91
'jwanh', # 0x92
'jwad', # 0x93
'jwal', # 0x94
'jwalg', # 0x95
'jwalm', # 0x96
'jwalb', # 0x97
'jwals', # 0x98
'jwalt', # 0x99
'jwalp', # 0x9a
'jwalh', # 0x9b
'jwam', # 0x9c
'jwab', # 0x9d
'jwabs', # 0x9e
'jwas', # 0x9f
'jwass', # 0xa0
'jwang', # 0xa1
'jwaj', # 0xa2
'jwac', # 0xa3
'jwak', # 0xa4
'jwat', # 0xa5
'jwap', # 0xa6
'jwah', # 0xa7
'jwae', # 0xa8
'jwaeg', # 0xa9
'jwaegg', # 0xaa
'jwaegs', # 0xab
'jwaen', # 0xac
'jwaenj', # 0xad
'jwaenh', # 0xae
'jwaed', # 0xaf
'jwael', # 0xb0
'jwaelg', # 0xb1
'jwaelm', # 0xb2
'jwaelb', # 0xb3
'jwaels', # 0xb4
'jwaelt', # 0xb5
'jwaelp', # 0xb6
'jwaelh', # 0xb7
'jwaem', # 0xb8
'jwaeb', # 0xb9
'jwaebs', # 0xba
'jwaes', # 0xbb
'jwaess', # 0xbc
'jwaeng', # 0xbd
'jwaej', # 0xbe
'jwaec', # 0xbf
'jwaek', # 0xc0
'jwaet', # 0xc1
'jwaep', # 0xc2
'jwaeh', # 0xc3
'joe', # 0xc4
'joeg', # 0xc5
'joegg', # 0xc6
'joegs', # 0xc7
'joen', # 0xc8
'joenj', # 0xc9
'joenh', # 0xca
'joed', # 0xcb
'joel', # 0xcc
'joelg', # 0xcd
'joelm', # 0xce
'joelb', # 0xcf
'joels', # 0xd0
'joelt', # 0xd1
'joelp', # 0xd2
'joelh', # 0xd3
'joem', # 0xd4
'joeb', # 0xd5
'joebs', # 0xd6
'joes', # 0xd7
'joess', # 0xd8
'joeng', # 0xd9
'joej', # 0xda
'joec', # 0xdb
'joek', # 0xdc
'joet', # 0xdd
'joep', # 0xde
'joeh', # 0xdf
'jyo', # 0xe0
'jyog', # 0xe1
'jyogg', # 0xe2
'jyogs', # 0xe3
'jyon', # 0xe4
'jyonj', # 0xe5
'jyonh', # 0xe6
'jyod', # 0xe7
'jyol', # 0xe8
'jyolg', # 0xe9
'jyolm', # 0xea
'jyolb', # 0xeb
'jyols', # 0xec
'jyolt', # 0xed
'jyolp', # 0xee
'jyolh', # 0xef
'jyom', # 0xf0
'jyob', # 0xf1
'jyobs', # 0xf2
'jyos', # 0xf3
'jyoss', # 0xf4
'jyong', # 0xf5
'jyoj', # 0xf6
'jyoc', # 0xf7
'jyok', # 0xf8
'jyot', # 0xf9
'jyop', # 0xfa
'jyoh', # 0xfb
'ju', # 0xfc
'jug', # 0xfd
'jugg', # 0xfe
'jugs', # 0xff
)
| apache-2.0 |
zpzgone/paramiko | paramiko/primes.py | 39 | 4949 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Utility functions for dealing with primes.
"""
import os
from paramiko import util
from paramiko.py3compat import byte_mask, long
from paramiko.ssh_exception import SSHException
from paramiko.common import *
def _roll_random(n):
"""returns a random # from 0 to N-1"""
bits = util.bit_length(n - 1)
byte_count = (bits + 7) // 8
hbyte_mask = pow(2, bits % 8) - 1
# so here's the plan:
# we fetch as many random bits as we'd need to fit N-1, and if the
# generated number is >= N, we try again. in the worst case (N-1 is a
# power of 2), we have slightly better than 50% odds of getting one that
# fits, so i can't guarantee that this loop will ever finish, but the odds
# of it looping forever should be infinitesimal.
while True:
x = os.urandom(byte_count)
if hbyte_mask > 0:
x = byte_mask(x[0], hbyte_mask) + x[1:]
num = util.inflate_long(x, 1)
if num < n:
break
return num
class ModulusPack (object):
"""
convenience object for holding the contents of the /etc/ssh/moduli file,
on systems that have such a file.
"""
def __init__(self):
# pack is a hash of: bits -> [ (generator, modulus) ... ]
self.pack = {}
self.discarded = []
def _parse_modulus(self, line):
timestamp, mod_type, tests, tries, size, generator, modulus = line.split()
mod_type = int(mod_type)
tests = int(tests)
tries = int(tries)
size = int(size)
generator = int(generator)
modulus = long(modulus, 16)
# weed out primes that aren't at least:
# type 2 (meets basic structural requirements)
# test 4 (more than just a small-prime sieve)
# tries < 100 if test & 4 (at least 100 tries of miller-rabin)
if (mod_type < 2) or (tests < 4) or ((tests & 4) and (tests < 8) and (tries < 100)):
self.discarded.append((modulus, 'does not meet basic requirements'))
return
if generator == 0:
generator = 2
# there's a bug in the ssh "moduli" file (yeah, i know: shock! dismay!
# call cnn!) where it understates the bit lengths of these primes by 1.
# this is okay.
bl = util.bit_length(modulus)
if (bl != size) and (bl != size + 1):
self.discarded.append((modulus, 'incorrectly reported bit length %d' % size))
return
if bl not in self.pack:
self.pack[bl] = []
self.pack[bl].append((generator, modulus))
def read_file(self, filename):
"""
:raises IOError: passed from any file operations that fail.
"""
self.pack = {}
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if (len(line) == 0) or (line[0] == '#'):
continue
try:
self._parse_modulus(line)
except:
continue
def get_modulus(self, min, prefer, max):
bitsizes = sorted(self.pack.keys())
if len(bitsizes) == 0:
raise SSHException('no moduli available')
good = -1
# find nearest bitsize >= preferred
for b in bitsizes:
if (b >= prefer) and (b < max) and (b < good or good == -1):
good = b
# if that failed, find greatest bitsize >= min
if good == -1:
for b in bitsizes:
if (b >= min) and (b < max) and (b > good):
good = b
if good == -1:
# their entire (min, max) range has no intersection with our range.
# if their range is below ours, pick the smallest. otherwise pick
# the largest. it'll be out of their range requirement either way,
# but we'll be sending them the closest one we have.
good = bitsizes[0]
if min > good:
good = bitsizes[-1]
# now pick a random modulus of this bitsize
n = _roll_random(len(self.pack[good]))
return self.pack[good][n]
| lgpl-2.1 |
watonyweng/horizon | openstack_dashboard/static_settings.py | 9 | 6269 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This file contains configuration for the locations of all the static file
libraries, such as JavaScript and CSS libraries. Packagers for individual
distributions can edit or replace this file, in order to change the paths
to match their distribution's standards.
"""
import os
import xstatic.main
import xstatic.pkg.angular
import xstatic.pkg.angular_bootstrap
import xstatic.pkg.angular_gettext
import xstatic.pkg.angular_lrdragndrop
import xstatic.pkg.angular_smart_table
import xstatic.pkg.bootstrap_datepicker
import xstatic.pkg.bootstrap_scss
import xstatic.pkg.d3
import xstatic.pkg.font_awesome
import xstatic.pkg.hogan
import xstatic.pkg.jasmine
import xstatic.pkg.jquery
import xstatic.pkg.jquery_migrate
import xstatic.pkg.jquery_quicksearch
import xstatic.pkg.jquery_tablesorter
import xstatic.pkg.jquery_ui
import xstatic.pkg.jsencrypt
import xstatic.pkg.magic_search
import xstatic.pkg.qunit
import xstatic.pkg.rickshaw
import xstatic.pkg.spin
import xstatic.pkg.termjs
from horizon.utils import file_discovery
def get_staticfiles_dirs(webroot='/'):
STATICFILES_DIRS = [
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_bootstrap,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_gettext,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_lrdragndrop,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_smart_table,
root_url=webroot).base_dir),
('horizon/lib/bootstrap_datepicker',
xstatic.main.XStatic(xstatic.pkg.bootstrap_datepicker,
root_url=webroot).base_dir),
('bootstrap',
xstatic.main.XStatic(xstatic.pkg.bootstrap_scss,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.d3,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.hogan,
root_url=webroot).base_dir),
('horizon/lib/font-awesome',
xstatic.main.XStatic(xstatic.pkg.font_awesome,
root_url=webroot).base_dir),
('horizon/lib/jasmine',
xstatic.main.XStatic(xstatic.pkg.jasmine,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_migrate,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_quicksearch,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_tablesorter,
root_url=webroot).base_dir),
('horizon/lib/jsencrypt',
xstatic.main.XStatic(xstatic.pkg.jsencrypt,
root_url=webroot).base_dir),
('horizon/lib/magic_search',
xstatic.main.XStatic(xstatic.pkg.magic_search,
root_url=webroot).base_dir),
('horizon/lib/qunit',
xstatic.main.XStatic(xstatic.pkg.qunit,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.rickshaw,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.spin,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.termjs,
root_url=webroot).base_dir),
]
if xstatic.main.XStatic(xstatic.pkg.jquery_ui,
root_url=webroot).version.startswith('1.10.'):
# The 1.10.x versions already contain the 'ui' directory.
STATICFILES_DIRS.append(
('horizon/lib/jquery-ui',
xstatic.main.XStatic(xstatic.pkg.jquery_ui,
root_url=webroot).base_dir))
else:
# Newer versions dropped the directory, add it to keep the path the
# same.
STATICFILES_DIRS.append(
('horizon/lib/jquery-ui/ui',
xstatic.main.XStatic(xstatic.pkg.jquery_ui,
root_url=webroot).base_dir))
return STATICFILES_DIRS
def find_static_files(ROOT_PATH, HORIZON_CONFIG):
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(ROOT_PATH, '..', 'horizon', 'static/')
)
# filter out non-angular javascript code and lib
HORIZON_CONFIG['js_files'] = ([f for f in HORIZON_CONFIG['js_files']
if not f.startswith('horizon/')])
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(ROOT_PATH, 'static/'),
sub_path='app/'
)
| apache-2.0 |
jneves/OctoPrint | tests/util/test_comm_helpers.py | 18 | 5466 | # coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import unittest
from ddt import ddt, data, unpack
@ddt
class TestCommHelpers(unittest.TestCase):
@data(
("M117 Test", "M117 Test"),
("M117 Test ; foo", "M117 Test "),
("M117 Test \\; foo", "M117 Test \\; foo"),
("M117 Test \\\\; foo", "M117 Test \\\\"),
("M117 Test \\\\\\; foo", "M117 Test \\\\\\; foo"),
("; foo", "")
)
@unpack
def test_strip_comment(self, input, expected):
from octoprint.util import comm
self.assertEquals(expected, comm.strip_comment(input))
@data(
("M117 Test", None, None, "M117 Test"),
("", None, None, None),
(" \t \r \n", None, None, None),
("M117 Test", dict(), 0, "M117 Test")
)
@unpack
def test_process_gcode_line(self, input, offsets, current_tool, expected):
from octoprint.util import comm
self.assertEquals(expected, comm.process_gcode_line(input, offsets=offsets, current_tool=current_tool))
@data(
("M104 S200", None, None, None),
("M117 Test", dict(), None, None),
("M104 T0", dict(), None, None),
("M104 S220", dict(tool0=10, tool1=20, bed=30), 0, 230.0),
("M104 T1 S220", dict(tool0=10, tool1=20, bed=30), 0, 240.0),
("M104 S220", dict(tool0=10, tool1=20, bed=30), 1, 240.0),
("M140 S100", dict(tool0=10, tool1=20, bed=30), 1, 130.0),
("M190 S100", dict(tool0=10, tool1=20, bed=30), 1, 130.0),
("M109 S220", dict(tool0=10, tool1=20, bed=30), 0, 230.0),
("M109 S220", dict(), 0, None),
("M140 S100", dict(), 0, None),
("M104 S220", dict(tool0=0), 0, None),
("M104 S220", dict(tool0=20), None, None),
("M104 S0", dict(tool0=20), 0, None)
)
@unpack
def test_apply_temperature_offsets(self, input, offsets, current_tool, expected):
from octoprint.util import comm
actual = comm.apply_temperature_offsets(input, offsets, current_tool=current_tool)
if expected is None:
self.assertEquals(input, actual)
else:
import re
match = re.search("S(\d+(\.\d+)?)", actual)
if not match:
self.fail("No temperature found")
temperature = float(match.group(1))
self.assertEquals(expected, temperature)
self.assertEquals(input[:match.start(1)], actual[:match.start(1)])
self.assertEquals(input[match.end(1):], actual[match.end(1):])
def test_convert_pause_triggers(self):
configured_triggers = [
dict(regex="pause1", type="enable"),
dict(regex="pause2", type="enable"),
dict(regex="resume", type="disable"),
dict(regex="toggle", type="toggle"),
dict(type="enable"),
dict(regex="regex"),
dict(regex="regex", type="unknown")
]
from octoprint.util import comm
trigger_matchers = comm.convert_pause_triggers(configured_triggers)
self.assertIsNotNone(trigger_matchers)
self.assertIn("enable", trigger_matchers)
self.assertEquals("(pause1)|(pause2)", trigger_matchers["enable"].pattern)
self.assertIn("disable", trigger_matchers)
self.assertEquals("(resume)", trigger_matchers["disable"].pattern)
self.assertIn("toggle", trigger_matchers)
self.assertEquals("(toggle)", trigger_matchers["toggle"].pattern)
self.assertNotIn("unknown", trigger_matchers)
def test_convert_feedback_controls(self):
def md5sum(input):
import hashlib
m = hashlib.md5()
m.update(input)
return m.hexdigest()
temp_regex = "T:((\d*\.)\d+)"
temp_template = "Temp: {}"
temp2_template = "Temperature: {}"
temp_key = md5sum(temp_regex)
temp_template_key = md5sum(temp_template)
temp2_template_key = md5sum(temp2_template)
x_regex = "X:(?P<x>\d+)"
x_template = "X: {x}"
x_key = md5sum(x_regex)
x_template_key = md5sum(x_template)
configured_controls = [
dict(key=temp_key, regex=temp_regex, template=temp_template, template_key=temp_template_key),
dict(command="M117 Hello World", name="Test"),
dict(children=[
dict(key=x_key, regex=x_regex, template=x_template, template_key=x_template_key),
dict(key=temp_key, regex=temp_regex, template=temp2_template, template_key=temp2_template_key)
])
]
from octoprint.util import comm
controls, matcher = comm.convert_feedback_controls(configured_controls)
self.assertEquals(2, len(controls))
# temp_regex is used twice, so we should have two templates for it
self.assertIn(temp_key, controls)
temp = controls[temp_key]
self.assertIsNotNone(temp["matcher"])
self.assertEquals(temp_regex, temp["matcher"].pattern)
self.assertEquals(temp_regex, temp["pattern"])
self.assertEquals(2, len(temp["templates"]))
self.assertIn(temp_template_key, temp["templates"])
self.assertEquals(temp_template, temp["templates"][temp_template_key])
self.assertIn(temp2_template_key, temp["templates"])
self.assertEquals(temp2_template, temp["templates"][temp2_template_key])
# x_regex is used once, so we should have only one template for it
self.assertIn(x_key, controls)
x = controls[x_key]
self.assertIsNotNone(x["matcher"])
self.assertEquals(x_regex, x["matcher"].pattern)
self.assertEquals(x_regex, x["pattern"])
self.assertEquals(1, len(x["templates"]))
self.assertIn(x_template_key, x["templates"])
self.assertEquals(x_template, x["templates"][x_template_key])
self.assertEquals("(?P<group{temp_key}>{temp_regex})|(?P<group{x_key}>{x_regex})".format(**locals()), matcher.pattern)
| agpl-3.0 |
romain-li/edx-platform | lms/urls.py | 1 | 33080 | """
URLs for LMS
"""
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.views.generic.base import RedirectView
from ratelimitbackend import admin
from django.conf.urls.static import static
from courseware.views.views import CourseTabView, EnrollStaffView, StaticCourseTabView
from config_models.views import ConfigurationModelCurrentAPIView
from courseware.views.index import CoursewareIndex
from django_comment_common.models import ForumsConfig
from openedx.core.djangoapps.auth_exchange.views import LoginWithAccessTokenView
from openedx.core.djangoapps.catalog.models import CatalogIntegration
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.features.enterprise_support.api import enterprise_enabled
# Uncomment the next two lines to enable the admin:
if settings.DEBUG or settings.FEATURES.get('ENABLE_DJANGO_ADMIN_SITE'):
admin.autodiscover()
# Use urlpatterns formatted as within the Django docs with first parameter "stuck" to the open parenthesis
urlpatterns = (
'',
url(r'^$', 'branding.views.index', name="root"), # Main marketing page, or redirect to courseware
url(r'', include('student.urls')),
# TODO: Move lms specific student views out of common code
url(r'^dashboard$', 'student.views.dashboard', name="dashboard"),
url(r'^change_enrollment$', 'student.views.change_enrollment', name='change_enrollment'),
# Event tracking endpoints
url(r'', include('track.urls')),
# Performance endpoints
url(r'', include('openedx.core.djangoapps.performance.urls')),
# Static template view endpoints like blog, faq, etc.
url(r'', include('static_template_view.urls')),
url(r'^heartbeat$', include('openedx.core.djangoapps.heartbeat.urls')),
# Note: these are older versions of the User API that will eventually be
# subsumed by api/user listed below.
url(r'^user_api/', include('openedx.core.djangoapps.user_api.legacy_urls')),
url(r'^notifier_api/', include('notifier_api.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
# Feedback Form endpoint
url(r'^submit_feedback$', 'util.views.submit_feedback'),
# Enrollment API RESTful endpoints
url(r'^api/enrollment/v1/', include('enrollment.urls')),
# Courseware search endpoints
url(r'^search/', include('search.urls')),
# Course content API
url(r'^api/course_structure/', include('course_structure_api.urls', namespace='course_structure_api')),
# Course API
url(r'^api/courses/', include('course_api.urls')),
# User API endpoints
url(r'^api/user/', include('openedx.core.djangoapps.user_api.urls')),
# Bookmarks API endpoints
url(r'^api/bookmarks/', include('openedx.core.djangoapps.bookmarks.urls')),
# Profile Images API endpoints
url(r'^api/profile_images/', include('openedx.core.djangoapps.profile_images.urls')),
# Video Abstraction Layer used to allow video teams to manage video assets
# independently of courseware. https://github.com/edx/edx-val
url(r'^api/val/v0/', include('edxval.urls')),
url(r'^api/commerce/', include('commerce.api.urls', namespace='commerce_api')),
url(r'^api/credit/', include('openedx.core.djangoapps.credit.urls', app_name="credit", namespace='credit')),
url(r'^rss_proxy/', include('rss_proxy.urls', namespace='rss_proxy')),
url(r'^api/organizations/', include('organizations.urls', namespace='organizations')),
# Update session view
url(
r'^lang_pref/session_language',
'openedx.core.djangoapps.lang_pref.views.update_session_language',
name='session_language'
),
# Multiple course modes and identity verification
# TODO Namespace these!
url(r'^course_modes/', include('course_modes.urls')),
url(r'^verify_student/', include('verify_student.urls')),
# URLs for managing dark launches of languages
url(r'^update_lang/', include('openedx.core.djangoapps.dark_lang.urls', namespace='dark_lang')),
# URLs for API access management
url(r'^api-admin/', include('openedx.core.djangoapps.api_admin.urls', namespace='api_admin')),
)
urlpatterns += (
url(r'^dashboard/', include('learner_dashboard.urls')),
)
# TODO: This needs to move to a separate urls.py once the student_account and
# student views below find a home together
if settings.FEATURES["ENABLE_COMBINED_LOGIN_REGISTRATION"]:
# Backwards compatibility with old URL structure, but serve the new views
urlpatterns += (
url(r'^login$', 'student_account.views.login_and_registration_form',
{'initial_mode': 'login'}, name="signin_user"),
url(r'^register$', 'student_account.views.login_and_registration_form',
{'initial_mode': 'register'}, name="register_user"),
)
else:
# Serve the old views
urlpatterns += (
url(r'^login$', 'student.views.signin_user', name="signin_user"),
url(r'^register$', 'student.views.register_user', name="register_user"),
)
if settings.FEATURES["ENABLE_MOBILE_REST_API"]:
urlpatterns += (
url(r'^api/mobile/v0.5/', include('mobile_api.urls')),
)
if settings.FEATURES["ENABLE_OPENBADGES"]:
urlpatterns += (
url(r'^api/badges/v1/', include('badges.api.urls', app_name="badges", namespace="badges_api")),
)
js_info_dict = {
'domain': 'djangojs',
# We need to explicitly include external Django apps that are not in LOCALE_PATHS.
'packages': ('openassessment',),
}
# sysadmin dashboard, to see what courses are loaded, to delete & load courses
if settings.FEATURES["ENABLE_SYSADMIN_DASHBOARD"]:
urlpatterns += (
url(r'^sysadmin/', include('dashboard.sysadmin_urls')),
)
urlpatterns += (
url(r'^support/', include('support.urls', app_name="support", namespace='support')),
)
# Favicon
favicon_path = configuration_helpers.get_value('favicon_path', settings.FAVICON_PATH) # pylint: disable=invalid-name
urlpatterns += (url(
r'^favicon\.ico$',
RedirectView.as_view(url=settings.STATIC_URL + favicon_path, permanent=True)
),)
# Multicourse wiki (Note: wiki urls must be above the courseware ones because of
# the custom tab catch-all)
if settings.WIKI_ENABLED:
from wiki.urls import get_pattern as wiki_pattern
from django_notify.urls import get_pattern as notify_pattern
urlpatterns += (
# First we include views from course_wiki that we use to override the default views.
# They come first in the urlpatterns so they get resolved first
url('^wiki/create-root/$', 'course_wiki.views.root_create', name='root_create'),
url(r'^wiki/', include(wiki_pattern())),
url(r'^notify/', include(notify_pattern())),
# These urls are for viewing the wiki in the context of a course. They should
# never be returned by a reverse() so they come after the other url patterns
url(r'^courses/{}/course_wiki/?$'.format(settings.COURSE_ID_PATTERN),
'course_wiki.views.course_wiki_redirect', name="course_wiki"),
url(r'^courses/{}/wiki/'.format(settings.COURSE_KEY_REGEX), include(wiki_pattern())),
)
COURSE_URLS = patterns(
'',
url(
r'^look_up_registration_code$',
'lms.djangoapps.instructor.views.registration_codes.look_up_registration_code',
name='look_up_registration_code',
),
url(
r'^registration_code_details$',
'lms.djangoapps.instructor.views.registration_codes.registration_code_details',
name='registration_code_details',
),
)
urlpatterns += (
# jump_to URLs for direct access to a location in the course
url(
r'^courses/{}/jump_to/(?P<location>.*)$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.jump_to',
name='jump_to',
),
url(
r'^courses/{}/jump_to_id/(?P<module_id>.*)$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.jump_to_id',
name='jump_to_id',
),
# xblock Handler APIs
url(
r'^courses/{course_key}/xblock/{usage_key}/handler/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$'.format(
course_key=settings.COURSE_ID_PATTERN,
usage_key=settings.USAGE_ID_PATTERN,
),
'courseware.module_render.handle_xblock_callback',
name='xblock_handler',
),
url(
r'^courses/{course_key}/xblock/{usage_key}/handler_noauth/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$'.format(
course_key=settings.COURSE_ID_PATTERN,
usage_key=settings.USAGE_ID_PATTERN,
),
'courseware.module_render.handle_xblock_callback_noauth',
name='xblock_handler_noauth',
),
# xblock View API
# (unpublished) API that returns JSON with the HTML fragment and related resources
# for the xBlock's requested view.
url(
r'^courses/{course_key}/xblock/{usage_key}/view/(?P<view_name>[^/]*)$'.format(
course_key=settings.COURSE_ID_PATTERN,
usage_key=settings.USAGE_ID_PATTERN,
),
'courseware.module_render.xblock_view',
name='xblock_view',
),
# xblock Rendering View URL
# URL to provide an HTML view of an xBlock. The view type (e.g., student_view) is
# passed as a "view" parameter to the URL.
# Note: This is not an API. Compare this with the xblock_view API above.
url(
r'^xblock/{usage_key_string}$'.format(usage_key_string=settings.USAGE_KEY_PATTERN),
'courseware.views.views.render_xblock',
name='render_xblock',
),
# xblock Resource URL
url(
r'xblock/resource/(?P<block_type>[^/]+)/(?P<uri>.*)$',
'openedx.core.djangoapps.common_views.xblock.xblock_resource',
name='xblock_resource_url',
),
url(
r'^courses/{}/xqueue/(?P<userid>[^/]*)/(?P<mod_id>.*?)/(?P<dispatch>[^/]*)$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.module_render.xqueue_callback',
name='xqueue_callback',
),
# TODO: These views need to be updated before they work
url(r'^calculate$', 'util.views.calculate'),
url(r'^courses/?$', 'branding.views.courses', name="courses"),
#About the course
url(
r'^courses/{}/about$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.course_about',
name='about_course',
),
url(
r'^courses/{}/enroll_staff$'.format(
settings.COURSE_ID_PATTERN,
),
EnrollStaffView.as_view(),
name='enroll_staff',
),
#Inside the course
url(
r'^courses/{}/$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.course_info',
name='course_root',
),
url(
r'^courses/{}/info$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.course_info',
name='info',
),
# TODO arjun remove when custom tabs in place, see courseware/courses.py
url(
r'^courses/{}/syllabus$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.syllabus',
name='syllabus',
),
# Survey associated with a course
url(
r'^courses/{}/survey$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.course_survey',
name='course_survey',
),
url(
r'^courses/{}/book/(?P<book_index>\d+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.index',
name='book',
),
url(
r'^courses/{}/book/(?P<book_index>\d+)/(?P<page>\d+)$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.index',
name='book',
),
url(
r'^courses/{}/pdfbook/(?P<book_index>\d+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.pdf_index',
name='pdf_book',
),
url(
r'^courses/{}/pdfbook/(?P<book_index>\d+)/(?P<page>\d+)$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.pdf_index',
name='pdf_book',
),
url(
r'^courses/{}/pdfbook/(?P<book_index>\d+)/chapter/(?P<chapter>\d+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.pdf_index',
name='pdf_book',
),
url(
r'^courses/{}/pdfbook/(?P<book_index>\d+)/chapter/(?P<chapter>\d+)/(?P<page>\d+)$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.pdf_index',
name='pdf_book',
),
url(
r'^courses/{}/htmlbook/(?P<book_index>\d+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.html_index',
name='html_book',
),
url(
r'^courses/{}/htmlbook/(?P<book_index>\d+)/chapter/(?P<chapter>\d+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.html_index',
name='html_book',
),
url(
r'^courses/{}/courseware/?$'.format(
settings.COURSE_ID_PATTERN,
),
CoursewareIndex.as_view(),
name='courseware',
),
url(
r'^courses/{}/courseware/(?P<chapter>[^/]*)/$'.format(
settings.COURSE_ID_PATTERN,
),
CoursewareIndex.as_view(),
name='courseware_chapter',
),
url(
r'^courses/{}/courseware/(?P<chapter>[^/]*)/(?P<section>[^/]*)/$'.format(
settings.COURSE_ID_PATTERN,
),
CoursewareIndex.as_view(),
name='courseware_section',
),
url(
r'^courses/{}/courseware/(?P<chapter>[^/]*)/(?P<section>[^/]*)/(?P<position>[^/]*)/?$'.format(
settings.COURSE_ID_PATTERN,
),
CoursewareIndex.as_view(),
name='courseware_position',
),
# progress page
url(
r'^courses/{}/progress$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.progress',
name='progress',
),
# Takes optional student_id for instructor use--shows profile as that student sees it.
url(
r'^courses/{}/progress/(?P<student_id>[^/]*)/$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.progress',
name='student_progress',
),
url(
r'^programs/{}/about'.format(
r'(?P<program_uuid>[0-9a-f-]+)',
),
'courseware.views.views.program_marketing',
name='program_marketing_view',
),
# rest api for grades
url(
r'^api/grades/',
include('lms.djangoapps.grades.api.urls', namespace='grades_api')
),
# For the instructor
url(
r'^courses/{}/instructor$'.format(
settings.COURSE_ID_PATTERN,
),
'lms.djangoapps.instructor.views.instructor_dashboard.instructor_dashboard_2',
name='instructor_dashboard',
),
url(
r'^courses/{}/set_course_mode_price$'.format(
settings.COURSE_ID_PATTERN,
),
'lms.djangoapps.instructor.views.instructor_dashboard.set_course_mode_price',
name='set_course_mode_price',
),
url(
r'^courses/{}/instructor/api/'.format(
settings.COURSE_ID_PATTERN,
),
include('lms.djangoapps.instructor.views.api_urls')),
url(
r'^courses/{}/remove_coupon$'.format(
settings.COURSE_ID_PATTERN,
),
'lms.djangoapps.instructor.views.coupons.remove_coupon',
name='remove_coupon',
),
url(
r'^courses/{}/add_coupon$'.format(
settings.COURSE_ID_PATTERN,
),
'lms.djangoapps.instructor.views.coupons.add_coupon',
name='add_coupon',
),
url(
r'^courses/{}/update_coupon$'.format(
settings.COURSE_ID_PATTERN,
),
'lms.djangoapps.instructor.views.coupons.update_coupon',
name='update_coupon',
),
url(
r'^courses/{}/get_coupon_info$'.format(
settings.COURSE_ID_PATTERN,
),
'lms.djangoapps.instructor.views.coupons.get_coupon_info',
name='get_coupon_info',
),
url(
r'^courses/{}/'.format(
settings.COURSE_ID_PATTERN,
),
include(COURSE_URLS)
),
# Cohorts management
url(
r'^courses/{}/cohorts/settings$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.course_cohort_settings_handler',
name='course_cohort_settings',
),
url(
r'^courses/{}/cohorts/(?P<cohort_id>[0-9]+)?$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.cohort_handler',
name='cohorts',
),
url(
r'^courses/{}/cohorts/(?P<cohort_id>[0-9]+)$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.users_in_cohort',
name='list_cohort',
),
url(
r'^courses/{}/cohorts/(?P<cohort_id>[0-9]+)/add$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.add_users_to_cohort',
name='add_to_cohort',
),
url(
r'^courses/{}/cohorts/(?P<cohort_id>[0-9]+)/delete$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.remove_user_from_cohort',
name='remove_from_cohort',
),
url(
r'^courses/{}/cohorts/debug$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.debug_cohort_mgmt',
name='debug_cohort_mgmt',
),
url(
r'^courses/{}/cohorts/topics$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.cohort_discussion_topics',
name='cohort_discussion_topics',
),
url(
r'^courses/{}/verified_track_content/settings'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.verified_track_content.views.cohorting_settings',
name='verified_track_cohorting',
),
url(
r'^courses/{}/notes$'.format(
settings.COURSE_ID_PATTERN,
),
'notes.views.notes',
name='notes',
),
url(
r'^courses/{}/notes/'.format(
settings.COURSE_ID_PATTERN,
),
include('notes.urls')
),
# LTI endpoints listing
url(
r'^courses/{}/lti_rest_endpoints/'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.get_course_lti_endpoints',
name='lti_rest_endpoints',
),
# Student account
url(
r'^account/',
include('student_account.urls')
),
# Student profile
url(
r'^u/(?P<username>[\w.@+-]+)$',
'student_profile.views.learner_profile',
name='learner_profile',
),
# Student Notes
url(
r'^courses/{}/edxnotes'.format(
settings.COURSE_ID_PATTERN,
),
include('edxnotes.urls'),
name='edxnotes_endpoints',
),
# Branding API
url(
r'^api/branding/v1/',
include('branding.api_urls')
),
# Course experience
url(
r'^courses/{}/course/'.format(
settings.COURSE_ID_PATTERN,
),
include('openedx.features.course_experience.urls'),
),
# Course bookmarks
url(
r'^courses/{}/bookmarks/'.format(
settings.COURSE_ID_PATTERN,
),
include('openedx.features.course_bookmarks.urls'),
),
)
if settings.FEATURES["ENABLE_TEAMS"]:
# Teams endpoints
urlpatterns += (
url(
r'^api/team/',
include('lms.djangoapps.teams.api_urls')
),
url(
r'^courses/{}/teams'.format(
settings.COURSE_ID_PATTERN,
),
include('lms.djangoapps.teams.urls'),
name='teams_endpoints',
),
)
# allow course staff to change to student view of courseware
if settings.FEATURES.get('ENABLE_MASQUERADE'):
urlpatterns += (
url(
r'^courses/{}/masquerade$'.format(
settings.COURSE_KEY_PATTERN,
),
'courseware.masquerade.handle_ajax',
name='masquerade_update',
),
)
urlpatterns += (
url(
r'^courses/{}/generate_user_cert'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.generate_user_cert',
name='generate_user_cert',
),
)
# discussion forums live within courseware, so courseware must be enabled first
if settings.FEATURES.get('ENABLE_DISCUSSION_SERVICE'):
urlpatterns += (
url(
r'^api/discussion/',
include('discussion_api.urls')
),
url(
r'^courses/{}/discussion/'.format(
settings.COURSE_ID_PATTERN,
),
include('django_comment_client.urls')
),
url(
r'^courses/{}/discussion/forum/'.format(
settings.COURSE_ID_PATTERN,
),
include('discussion.urls')
),
url(
r'^notification_prefs/enable/',
'notification_prefs.views.ajax_enable'
),
url(
r'^notification_prefs/disable/',
'notification_prefs.views.ajax_disable'
),
url(
r'^notification_prefs/status/',
'notification_prefs.views.ajax_status'
),
url(
r'^notification_prefs/unsubscribe/(?P<token>[a-zA-Z0-9-_=]+)/',
'notification_prefs.views.set_subscription',
{
'subscribe': False,
},
name='unsubscribe_forum_update',
),
url(
r'^notification_prefs/resubscribe/(?P<token>[a-zA-Z0-9-_=]+)/',
'notification_prefs.views.set_subscription',
{
'subscribe': True,
},
name='resubscribe_forum_update',
),
)
urlpatterns += (
url(
r'^courses/{}/tab/(?P<tab_type>[^/]+)/$'.format(
settings.COURSE_ID_PATTERN,
),
CourseTabView.as_view(),
name='course_tab_view',
),
)
urlpatterns += (
# This MUST be the last view in the courseware--it's a catch-all for custom tabs.
url(
r'^courses/{}/(?P<tab_slug>[^/]+)/$'.format(
settings.COURSE_ID_PATTERN,
),
StaticCourseTabView.as_view(),
name='static_tab',
),
)
if settings.FEATURES.get('ENABLE_STUDENT_HISTORY_VIEW'):
urlpatterns += (
url(
r'^courses/{}/submission_history/(?P<student_username>[^/]*)/(?P<location>.*?)$'.format(
settings.COURSE_ID_PATTERN
),
'courseware.views.views.submission_history',
name='submission_history',
),
)
if settings.FEATURES.get('CLASS_DASHBOARD'):
urlpatterns += (
url(r'^class_dashboard/', include('class_dashboard.urls')),
)
if settings.DEBUG or settings.FEATURES.get('ENABLE_DJANGO_ADMIN_SITE'):
## Jasmine and admin
urlpatterns += (url(r'^admin/', include(admin.site.urls)),)
if settings.FEATURES.get('AUTH_USE_OPENID'):
urlpatterns += (
url(r'^openid/login/$', 'django_openid_auth.views.login_begin', name='openid-login'),
url(
r'^openid/complete/$',
'openedx.core.djangoapps.external_auth.views.openid_login_complete',
name='openid-complete',
),
url(r'^openid/logo.gif$', 'django_openid_auth.views.logo', name='openid-logo'),
)
if settings.FEATURES.get('AUTH_USE_SHIB'):
urlpatterns += (
url(r'^shib-login/$', 'openedx.core.djangoapps.external_auth.views.shib_login', name='shib-login'),
)
if settings.FEATURES.get('AUTH_USE_CAS'):
urlpatterns += (
url(r'^cas-auth/login/$', 'openedx.core.djangoapps.external_auth.views.cas_login', name="cas-login"),
url(r'^cas-auth/logout/$', 'django_cas.views.logout', {'next_page': '/'}, name="cas-logout"),
)
if settings.FEATURES.get('RESTRICT_ENROLL_BY_REG_METHOD'):
urlpatterns += (
url(r'^course_specific_login/{}/$'.format(settings.COURSE_ID_PATTERN),
'openedx.core.djangoapps.external_auth.views.course_specific_login', name='course-specific-login'),
url(r'^course_specific_register/{}/$'.format(settings.COURSE_ID_PATTERN),
'openedx.core.djangoapps.external_auth.views.course_specific_register', name='course-specific-register'),
)
# Shopping cart
urlpatterns += (
url(r'^shoppingcart/', include('shoppingcart.urls')),
url(r'^commerce/', include('commerce.urls', namespace='commerce')),
)
# Embargo
if settings.FEATURES.get('EMBARGO'):
urlpatterns += (
url(r'^embargo/', include('openedx.core.djangoapps.embargo.urls')),
)
# Survey Djangoapp
urlpatterns += (
url(r'^survey/', include('survey.urls')),
)
if settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'):
urlpatterns += (
url(
r'^openid/provider/login/$',
'openedx.core.djangoapps.external_auth.views.provider_login',
name='openid-provider-login',
),
url(
r'^openid/provider/login/(?:.+)$',
'openedx.core.djangoapps.external_auth.views.provider_identity',
name='openid-provider-login-identity'
),
url(
r'^openid/provider/identity/$',
'openedx.core.djangoapps.external_auth.views.provider_identity',
name='openid-provider-identity',
),
url(
r'^openid/provider/xrds/$',
'openedx.core.djangoapps.external_auth.views.provider_xrds',
name='openid-provider-xrds',
),
)
if settings.FEATURES.get('ENABLE_OAUTH2_PROVIDER'):
urlpatterns += (
# These URLs dispatch to django-oauth-toolkit or django-oauth2-provider as appropriate.
# Developers should use these routes, to maintain compatibility for existing client code
url(r'^oauth2/', include('openedx.core.djangoapps.oauth_dispatch.urls')),
# These URLs contain the django-oauth2-provider default behavior. It exists to provide
# URLs for django-oauth2-provider to call using reverse() with the oauth2 namespace, and
# also to maintain support for views that have not yet been wrapped in dispatch views.
url(r'^oauth2/', include('edx_oauth2_provider.urls', namespace='oauth2')),
# The /_o/ prefix exists to provide a target for code in django-oauth-toolkit that
# uses reverse() with the 'oauth2_provider' namespace. Developers should not access these
# views directly, but should rather use the wrapped views at /oauth2/
url(r'^_o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
)
if settings.FEATURES.get('ENABLE_LMS_MIGRATION'):
urlpatterns += (
url(r'^migrate/modules$', 'lms_migration.migrate.manage_modulestores'),
url(r'^migrate/reload/(?P<reload_dir>[^/]+)$', 'lms_migration.migrate.manage_modulestores'),
url(
r'^migrate/reload/(?P<reload_dir>[^/]+)/(?P<commit_id>[^/]+)$',
'lms_migration.migrate.manage_modulestores'
),
url(r'^gitreload$', 'lms_migration.migrate.gitreload'),
url(r'^gitreload/(?P<reload_dir>[^/]+)$', 'lms_migration.migrate.gitreload'),
)
if settings.FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
urlpatterns += (
url(r'^event_logs$', 'track.views.view_tracking_log'),
url(r'^event_logs/(?P<args>.+)$', 'track.views.view_tracking_log'),
)
if settings.FEATURES.get('ENABLE_SERVICE_STATUS'):
urlpatterns += (
url(r'^status/', include('openedx.core.djangoapps.service_status.urls')),
)
if settings.FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
urlpatterns += (
url(
r'^instructor_task_status/$',
'lms.djangoapps.instructor_task.views.instructor_task_status',
name='instructor_task_status'
),
)
if settings.FEATURES.get('RUN_AS_ANALYTICS_SERVER_ENABLED'):
urlpatterns += (
url(r'^edinsights_service/', include('edinsights.core.urls')),
)
if settings.FEATURES.get('ENABLE_DEBUG_RUN_PYTHON'):
urlpatterns += (
url(r'^debug/run_python$', 'debug.views.run_python'),
)
urlpatterns += (
url(r'^debug/show_parameters$', 'debug.views.show_parameters'),
)
# Third-party auth.
if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH'):
urlpatterns += (
url(r'', include('third_party_auth.urls')),
url(r'api/third_party_auth/', include('third_party_auth.api.urls')),
# NOTE: The following login_oauth_token endpoint is DEPRECATED.
# Please use the exchange_access_token endpoint instead.
url(r'^login_oauth_token/(?P<backend>[^/]+)/$', 'student.views.login_oauth_token'),
)
# Enterprise
if enterprise_enabled():
urlpatterns += (
url(r'', include('enterprise.urls')),
)
# OAuth token exchange
if settings.FEATURES.get('ENABLE_OAUTH2_PROVIDER'):
urlpatterns += (
url(
r'^oauth2/login/$',
LoginWithAccessTokenView.as_view(),
name="login_with_access_token"
),
)
# Certificates
urlpatterns += (
url(r'^certificates/', include('certificates.urls', app_name="certificates", namespace="certificates")),
# Backwards compatibility with XQueue, which uses URLs that are not prefixed with /certificates/
url(r'^update_certificate$', 'certificates.views.update_certificate'),
url(r'^update_example_certificate$', 'certificates.views.update_example_certificate'),
url(r'^request_certificate$', 'certificates.views.request_certificate'),
# REST APIs
url(r'^api/certificates/',
include('lms.djangoapps.certificates.apis.urls', namespace='certificates_api')),
)
# XDomain proxy
urlpatterns += (
url(r'^xdomain_proxy.html$', 'openedx.core.djangoapps.cors_csrf.views.xdomain_proxy', name='xdomain_proxy'),
)
# Custom courses on edX (CCX) URLs
if settings.FEATURES["CUSTOM_COURSES_EDX"]:
urlpatterns += (
url(r'^courses/{}/'.format(settings.COURSE_ID_PATTERN),
include('ccx.urls')),
url(r'^api/ccx/', include('lms.djangoapps.ccx.api.urls', namespace='ccx_api')),
)
# Access to courseware as an LTI provider
if settings.FEATURES.get("ENABLE_LTI_PROVIDER"):
urlpatterns += (
url(r'^lti_provider/', include('lti_provider.urls')),
)
urlpatterns += (
url(r'config/self_paced', ConfigurationModelCurrentAPIView.as_view(model=SelfPacedConfiguration)),
url(r'config/programs', ConfigurationModelCurrentAPIView.as_view(model=ProgramsApiConfig)),
url(r'config/catalog', ConfigurationModelCurrentAPIView.as_view(model=CatalogIntegration)),
url(r'config/forums', ConfigurationModelCurrentAPIView.as_view(model=ForumsConfig)),
)
urlpatterns = patterns(*urlpatterns)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(
settings.PROFILE_IMAGE_BACKEND['options']['base_url'],
document_root=settings.PROFILE_IMAGE_BACKEND['options']['location']
)
urlpatterns += url(r'^template/(?P<template>.+)$', 'openedx.core.djangoapps.debug.views.show_reference_template'),
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += (
url(r'^__debug__/', include(debug_toolbar.urls)),
)
# Custom error pages
# These are used by Django to render these error codes. Do not remove.
# pylint: disable=invalid-name
handler404 = 'static_template_view.views.render_404'
handler500 = 'static_template_view.views.render_500'
# include into our URL patterns the HTTP REST API that comes with edx-proctoring.
urlpatterns += (
url(r'^api/', include('edx_proctoring.urls')),
)
if settings.FEATURES.get('ENABLE_FINANCIAL_ASSISTANCE_FORM'):
urlpatterns += (
url(
r'^financial-assistance/$',
'courseware.views.views.financial_assistance',
name='financial_assistance'
),
url(
r'^financial-assistance/apply/$',
'courseware.views.views.financial_assistance_form',
name='financial_assistance_form'
),
url(
r'^financial-assistance/submit/$',
'courseware.views.views.financial_assistance_request',
name='submit_financial_assistance_request'
)
)
| agpl-3.0 |
dawnpower/nova | nova/api/openstack/compute/plugins/v3/access_ips.py | 33 | 3886 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.schemas.v3 import access_ips
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = "os-access-ips"
authorize = extensions.os_compute_soft_authorizer(ALIAS)
class AccessIPsController(wsgi.Controller):
def _extend_server(self, req, server):
db_instance = req.get_db_instance(server['id'])
ip_v4 = db_instance.get('access_ip_v4')
ip_v6 = db_instance.get('access_ip_v6')
server['accessIPv4'] = (
str(ip_v4) if ip_v4 is not None else '')
server['accessIPv6'] = (
str(ip_v6) if ip_v6 is not None else '')
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
self._extend_server(req, server)
@wsgi.extends
def update(self, req, resp_obj, id, body):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
self._extend_server(req, server)
@wsgi.extends(action='rebuild')
def rebuild(self, req, resp_obj, id, body):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
self._extend_server(req, server)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = resp_obj.obj['servers']
for server in servers:
self._extend_server(req, server)
class AccessIPs(extensions.V3APIExtensionBase):
"""Access IPs support."""
name = "AccessIPs"
alias = ALIAS
version = 1
v4_key = 'accessIPv4'
v6_key = 'accessIPv6'
def get_controller_extensions(self):
controller = AccessIPsController()
extension = extensions.ControllerExtension(self, 'servers',
controller)
return [extension]
def get_resources(self):
return []
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
# making 'body_deprecated_param' as optional to avoid changes for
# server_update & server_rebuild
def server_create(self, server_dict, create_kwargs,
body_deprecated_param=None):
if AccessIPs.v4_key in server_dict:
access_ip_v4 = server_dict.get(AccessIPs.v4_key)
if access_ip_v4:
create_kwargs['access_ip_v4'] = access_ip_v4
else:
create_kwargs['access_ip_v4'] = None
if AccessIPs.v6_key in server_dict:
access_ip_v6 = server_dict.get(AccessIPs.v6_key)
if access_ip_v6:
create_kwargs['access_ip_v6'] = access_ip_v6
else:
create_kwargs['access_ip_v6'] = None
server_update = server_create
server_rebuild = server_create
def get_server_create_schema(self):
return access_ips.server_create
get_server_update_schema = get_server_create_schema
get_server_rebuild_schema = get_server_create_schema
| apache-2.0 |
MRigal/django | tests/model_formsets/models.py | 49 | 7480 | from __future__ import unicode_literals
import datetime
import uuid
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class BetterAuthor(Author):
write_speed = models.IntegerField()
@python_2_unicode_compatible
class Book(models.Model):
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title'),
)
ordering = ['id']
def __str__(self):
return self.title
@python_2_unicode_compatible
class BookWithCustomPK(models.Model):
my_pk = models.DecimalField(max_digits=5, decimal_places=0, primary_key=True)
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
def __str__(self):
return '%s: %s' % (self.my_pk, self.title)
class Editor(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class BookWithOptionalAltEditor(models.Model):
author = models.ForeignKey(Author)
# Optional secondary author
alt_editor = models.ForeignKey(Editor, blank=True, null=True)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title', 'alt_editor'),
)
def __str__(self):
return self.title
@python_2_unicode_compatible
class AlternateBook(Book):
notes = models.CharField(max_length=100)
def __str__(self):
return '%s - %s' % (self.title, self.notes)
@python_2_unicode_compatible
class AuthorMeeting(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
created = models.DateField(editable=False)
def __str__(self):
return self.name
class CustomPrimaryKey(models.Model):
my_pk = models.CharField(max_length=10, primary_key=True)
some_field = models.CharField(max_length=100)
# models for inheritance tests.
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
city = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Owner(models.Model):
auto_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
place = models.ForeignKey(Place)
def __str__(self):
return "%s at %s" % (self.name, self.place)
class Location(models.Model):
place = models.ForeignKey(Place, unique=True)
# this is purely for testing the data doesn't matter here :)
lat = models.CharField(max_length=100)
lon = models.CharField(max_length=100)
@python_2_unicode_compatible
class OwnerProfile(models.Model):
owner = models.OneToOneField(Owner, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %d" % (self.owner.name, self.age)
@python_2_unicode_compatible
class Restaurant(Place):
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class MexicanRestaurant(Restaurant):
serves_tacos = models.BooleanField(default=False)
class ClassyMexicanRestaurant(MexicanRestaurant):
restaurant = models.OneToOneField(MexicanRestaurant, parent_link=True, primary_key=True)
tacos_are_yummy = models.BooleanField(default=False)
# models for testing unique_together validation when a fk is involved and
# using inlineformset_factory.
@python_2_unicode_compatible
class Repository(models.Model):
name = models.CharField(max_length=25)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Revision(models.Model):
repository = models.ForeignKey(Repository)
revision = models.CharField(max_length=40)
class Meta:
unique_together = (("repository", "revision"),)
def __str__(self):
return "%s (%s)" % (self.revision, six.text_type(self.repository))
# models for testing callable defaults (see bug #7975). If you define a model
# with a callable default value, you cannot rely on the initial value in a
# form.
class Person(models.Model):
name = models.CharField(max_length=128)
class Membership(models.Model):
person = models.ForeignKey(Person)
date_joined = models.DateTimeField(default=datetime.datetime.now)
karma = models.IntegerField()
# models for testing a null=True fk to a parent
class Team(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Player(models.Model):
team = models.ForeignKey(Team, null=True)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
# Models for testing custom ModelForm save methods in formsets and inline formsets
@python_2_unicode_compatible
class Poet(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Poem(models.Model):
poet = models.ForeignKey(Poet)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.name
# Models for testing UUID primary keys
class UUIDPKParent(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
class UUIDPKChild(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent)
class ChildWithEditablePK(models.Model):
name = models.CharField(max_length=255, primary_key=True)
parent = models.ForeignKey(UUIDPKParent)
class AutoPKChildOfUUIDPKParent(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent)
class AutoPKParent(models.Model):
name = models.CharField(max_length=255)
class UUIDPKChildOfAutoPKParent(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(AutoPKParent)
class ParentWithUUIDAlternateKey(models.Model):
uuid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50)
class ChildRelatedViaAK(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(to=ParentWithUUIDAlternateKey, to_field='uuid')
| bsd-3-clause |
bcorbet/SickRage | lib/unidecode/x09b.py | 252 | 4655 | data = (
'Ti ', # 0x00
'Li ', # 0x01
'Bin ', # 0x02
'Zong ', # 0x03
'Ti ', # 0x04
'Peng ', # 0x05
'Song ', # 0x06
'Zheng ', # 0x07
'Quan ', # 0x08
'Zong ', # 0x09
'Shun ', # 0x0a
'Jian ', # 0x0b
'Duo ', # 0x0c
'Hu ', # 0x0d
'La ', # 0x0e
'Jiu ', # 0x0f
'Qi ', # 0x10
'Lian ', # 0x11
'Zhen ', # 0x12
'Bin ', # 0x13
'Peng ', # 0x14
'Mo ', # 0x15
'San ', # 0x16
'Man ', # 0x17
'Man ', # 0x18
'Seng ', # 0x19
'Xu ', # 0x1a
'Lie ', # 0x1b
'Qian ', # 0x1c
'Qian ', # 0x1d
'Nong ', # 0x1e
'Huan ', # 0x1f
'Kuai ', # 0x20
'Ning ', # 0x21
'Bin ', # 0x22
'Lie ', # 0x23
'Rang ', # 0x24
'Dou ', # 0x25
'Dou ', # 0x26
'Nao ', # 0x27
'Hong ', # 0x28
'Xi ', # 0x29
'Dou ', # 0x2a
'Han ', # 0x2b
'Dou ', # 0x2c
'Dou ', # 0x2d
'Jiu ', # 0x2e
'Chang ', # 0x2f
'Yu ', # 0x30
'Yu ', # 0x31
'Li ', # 0x32
'Juan ', # 0x33
'Fu ', # 0x34
'Qian ', # 0x35
'Gui ', # 0x36
'Zong ', # 0x37
'Liu ', # 0x38
'Gui ', # 0x39
'Shang ', # 0x3a
'Yu ', # 0x3b
'Gui ', # 0x3c
'Mei ', # 0x3d
'Ji ', # 0x3e
'Qi ', # 0x3f
'Jie ', # 0x40
'Kui ', # 0x41
'Hun ', # 0x42
'Ba ', # 0x43
'Po ', # 0x44
'Mei ', # 0x45
'Xu ', # 0x46
'Yan ', # 0x47
'Xiao ', # 0x48
'Liang ', # 0x49
'Yu ', # 0x4a
'Tui ', # 0x4b
'Qi ', # 0x4c
'Wang ', # 0x4d
'Liang ', # 0x4e
'Wei ', # 0x4f
'Jian ', # 0x50
'Chi ', # 0x51
'Piao ', # 0x52
'Bi ', # 0x53
'Mo ', # 0x54
'Ji ', # 0x55
'Xu ', # 0x56
'Chou ', # 0x57
'Yan ', # 0x58
'Zhan ', # 0x59
'Yu ', # 0x5a
'Dao ', # 0x5b
'Ren ', # 0x5c
'Ji ', # 0x5d
'Eri ', # 0x5e
'Gong ', # 0x5f
'Tuo ', # 0x60
'Diao ', # 0x61
'Ji ', # 0x62
'Xu ', # 0x63
'E ', # 0x64
'E ', # 0x65
'Sha ', # 0x66
'Hang ', # 0x67
'Tun ', # 0x68
'Mo ', # 0x69
'Jie ', # 0x6a
'Shen ', # 0x6b
'Fan ', # 0x6c
'Yuan ', # 0x6d
'Bi ', # 0x6e
'Lu ', # 0x6f
'Wen ', # 0x70
'Hu ', # 0x71
'Lu ', # 0x72
'Za ', # 0x73
'Fang ', # 0x74
'Fen ', # 0x75
'Na ', # 0x76
'You ', # 0x77
'Namazu ', # 0x78
'Todo ', # 0x79
'He ', # 0x7a
'Xia ', # 0x7b
'Qu ', # 0x7c
'Han ', # 0x7d
'Pi ', # 0x7e
'Ling ', # 0x7f
'Tuo ', # 0x80
'Bo ', # 0x81
'Qiu ', # 0x82
'Ping ', # 0x83
'Fu ', # 0x84
'Bi ', # 0x85
'Ji ', # 0x86
'Wei ', # 0x87
'Ju ', # 0x88
'Diao ', # 0x89
'Bo ', # 0x8a
'You ', # 0x8b
'Gun ', # 0x8c
'Pi ', # 0x8d
'Nian ', # 0x8e
'Xing ', # 0x8f
'Tai ', # 0x90
'Bao ', # 0x91
'Fu ', # 0x92
'Zha ', # 0x93
'Ju ', # 0x94
'Gu ', # 0x95
'Kajika ', # 0x96
'Tong ', # 0x97
'[?] ', # 0x98
'Ta ', # 0x99
'Jie ', # 0x9a
'Shu ', # 0x9b
'Hou ', # 0x9c
'Xiang ', # 0x9d
'Er ', # 0x9e
'An ', # 0x9f
'Wei ', # 0xa0
'Tiao ', # 0xa1
'Zhu ', # 0xa2
'Yin ', # 0xa3
'Lie ', # 0xa4
'Luo ', # 0xa5
'Tong ', # 0xa6
'Yi ', # 0xa7
'Qi ', # 0xa8
'Bing ', # 0xa9
'Wei ', # 0xaa
'Jiao ', # 0xab
'Bu ', # 0xac
'Gui ', # 0xad
'Xian ', # 0xae
'Ge ', # 0xaf
'Hui ', # 0xb0
'Bora ', # 0xb1
'Mate ', # 0xb2
'Kao ', # 0xb3
'Gori ', # 0xb4
'Duo ', # 0xb5
'Jun ', # 0xb6
'Ti ', # 0xb7
'Man ', # 0xb8
'Xiao ', # 0xb9
'Za ', # 0xba
'Sha ', # 0xbb
'Qin ', # 0xbc
'Yu ', # 0xbd
'Nei ', # 0xbe
'Zhe ', # 0xbf
'Gun ', # 0xc0
'Geng ', # 0xc1
'Su ', # 0xc2
'Wu ', # 0xc3
'Qiu ', # 0xc4
'Ting ', # 0xc5
'Fu ', # 0xc6
'Wan ', # 0xc7
'You ', # 0xc8
'Li ', # 0xc9
'Sha ', # 0xca
'Sha ', # 0xcb
'Gao ', # 0xcc
'Meng ', # 0xcd
'Ugui ', # 0xce
'Asari ', # 0xcf
'Subashiri ', # 0xd0
'Kazunoko ', # 0xd1
'Yong ', # 0xd2
'Ni ', # 0xd3
'Zi ', # 0xd4
'Qi ', # 0xd5
'Qing ', # 0xd6
'Xiang ', # 0xd7
'Nei ', # 0xd8
'Chun ', # 0xd9
'Ji ', # 0xda
'Diao ', # 0xdb
'Qie ', # 0xdc
'Gu ', # 0xdd
'Zhou ', # 0xde
'Dong ', # 0xdf
'Lai ', # 0xe0
'Fei ', # 0xe1
'Ni ', # 0xe2
'Yi ', # 0xe3
'Kun ', # 0xe4
'Lu ', # 0xe5
'Jiu ', # 0xe6
'Chang ', # 0xe7
'Jing ', # 0xe8
'Lun ', # 0xe9
'Ling ', # 0xea
'Zou ', # 0xeb
'Li ', # 0xec
'Meng ', # 0xed
'Zong ', # 0xee
'Zhi ', # 0xef
'Nian ', # 0xf0
'Shachi ', # 0xf1
'Dojou ', # 0xf2
'Sukesou ', # 0xf3
'Shi ', # 0xf4
'Shen ', # 0xf5
'Hun ', # 0xf6
'Shi ', # 0xf7
'Hou ', # 0xf8
'Xing ', # 0xf9
'Zhu ', # 0xfa
'La ', # 0xfb
'Zong ', # 0xfc
'Ji ', # 0xfd
'Bian ', # 0xfe
'Bian ', # 0xff
)
| gpl-3.0 |
autvincere/bureau-veritas-food | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py | 388 | 91069 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| mit |
jtyr/ansible | lib/ansible/module_utils/facts/packages.py | 51 | 2601 | # (c) 2018, Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from abc import ABCMeta, abstractmethod
from ansible.module_utils.six import with_metaclass
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common._utils import get_all_subclasses
def get_all_pkg_managers():
return dict([(obj.__name__.lower(), obj) for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr)])
class PkgMgr(with_metaclass(ABCMeta, object)):
@abstractmethod
def is_available(self):
# This method is supposed to return True/False if the package manager is currently installed/usable
# It can also 'prep' the required systems in the process of detecting availability
pass
@abstractmethod
def list_installed(self):
# This method should return a list of installed packages, each list item will be passed to get_package_details
pass
@abstractmethod
def get_package_details(self, package):
# This takes a 'package' item and returns a dictionary with the package information, name and version are minimal requirements
pass
def get_packages(self):
# Take all of the above and return a dictionary of lists of dictionaries (package = list of installed versions)
installed_packages = {}
for package in self.list_installed():
package_details = self.get_package_details(package)
if 'source' not in package_details:
package_details['source'] = self.__class__.__name__.lower()
name = package_details['name']
if name not in installed_packages:
installed_packages[name] = [package_details]
else:
installed_packages[name].append(package_details)
return installed_packages
class LibMgr(PkgMgr):
LIB = None
def __init__(self):
self._lib = None
super(LibMgr, self).__init__()
def is_available(self):
found = False
try:
self._lib = __import__(self.LIB)
found = True
except ImportError:
pass
return found
class CLIMgr(PkgMgr):
CLI = None
def __init__(self):
self._cli = None
super(CLIMgr, self).__init__()
def is_available(self):
try:
self._cli = get_bin_path(self.CLI)
except ValueError:
return False
return True
| gpl-3.0 |
40223134/0512 | static/Brython3.1.1-20150328-091302/Lib/keyword.py | 761 | 2049 | #! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
lines.sort()
# load the output skeleton from the target
with open(optfile) as fp:
format = fp.readlines()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
Jamesjue/linux_kernel_db | tools/perf/util/setup.py | 242 | 1531 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
donniexyz/calligra | 3rdparty/google-breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/decoder.py | 261 | 25883 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
_NAN = _POS_INF * 0
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(mask):
"""Like _VarintDecoder() but decodes signed values."""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
_DecodeVarint = _VarintDecoder((1 << 64) - 1)
_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1)
_DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1)
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos)
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if ((float_bytes[3] in '\x7F\xFF')
and (float_bytes[2] >= '\x80')):
# If at least one significand bit is set...
if float_bytes[0:3] != '\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3] == '\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7] in '\x7F\xFF')
and (double_bytes[6] >= '\xF0')
and (double_bytes[0:7] != '\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
# --------------------------------------------------------------------
Int32Decoder = EnumDecoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = unicode
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(local_unicode(buffer[pos:new_pos], 'utf-8'))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = local_unicode(buffer[pos:new_pos], 'utf-8')
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(extensions_by_number):
"""Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return pos
return DecodeItem
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
local_ord = ord
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = local_ord(tag_bytes[0]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
| gpl-2.0 |
devaha/archagent | node_modules/grunt-plugin/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py | 107 | 43862 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': 'NOT_USED_ON_ANDROID',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
SHARED_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
make.ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
# Grab output directories; needed for Actions and Rules.
self.WriteLn('gyp_intermediate_dir := $(call local-intermediates-dir)')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared)')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)' %
main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)' %
main_output)
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
rule_trigger = '%s_rule_trigger' % self.android_module
did_write_rule = False
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
did_write_rule = True
name = make.StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)'
% main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)'
% main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
self.WriteLn('%s: %s' % (output, main_output))
self.WriteLn('.PHONY: %s' % (rule_trigger))
self.WriteLn('%s: %s' % (rule_trigger, main_output))
self.WriteLn('')
if did_write_rule:
extra_sources.append(rule_trigger) # Force all rules to run.
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -r $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
config = configs[spec['default_configuration']]
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags'))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS')
cflags_c, includes_from_cflags_c = self.ExtractIncludesFromCFlags(
config.get('cflags_c'))
extracted_includes.extend(includes_from_cflags_c)
self.WriteList(cflags_c, 'MY_CFLAGS_C')
self.WriteList(config.get('defines'), 'MY_DEFS', prefix='-D',
quoter=make.EscapeCppDefine)
self.WriteLn('LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host or
# target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES)')
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_host_gyp'
else:
suffix = '_gyp'
if self.path:
name = '%s%s_%s%s' % (prefix, self.path, self.target, suffix)
else:
name = '%s%s%s' % (prefix, self.target, suffix)
return make.StringToMakefileVariable(name)
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable' and self.toolset == 'host':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$(HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$(TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = '$(call intermediates-dir-for,%s,%s,true)' % (self.android_class,
self.android_module)
else:
path = '$(call intermediates-dir-for,%s,%s)' % (self.android_class,
self.android_module)
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeLdFlags(self, ld_flags):
""" Clean up ldflags from gyp file.
Remove any ldflags that contain android_top_dir.
Args:
ld_flags: ldflags from gyp files.
Returns:
clean ldflags
"""
clean_ldflags = []
for flag in ld_flags:
if self.android_top_dir in flag:
continue
clean_ldflags.append(flag)
return clean_ldflags
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
if cflags:
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
config = configs[spec['default_configuration']]
# LDFLAGS
ldflags = list(config.get('ldflags', []))
static_flags, dynamic_flags = self.ComputeAndroidLibraryModuleNames(
ldflags)
self.WriteLn('')
self.WriteList(self.NormalizeLdFlags(ldflags), 'LOCAL_LDFLAGS')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_flags + static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_flags + dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
if self.toolset == 'host':
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
else:
# Don't install target executables for now, as it results in them being
# included in ROM. This can be revisited if there's a reason to install
# them later.
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
# Sort to avoid non-functional changes to makefile.
build_files = sorted([os.path.join('$(LOCAL_PATH)', f) for f in build_files])
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
build_files_args = [os.path.join('$(PRIVATE_LOCAL_PATH)', f)
for f in build_files_args]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
makefile_path = os.path.join('$(LOCAL_PATH)', makefile_name)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write('GYP_FILES := \\\n %s\n\n' %
'\\\n '.join(map(Sourceify, build_files)))
root_makefile.write('%s: PRIVATE_LOCAL_PATH := $(LOCAL_PATH)\n' %
makefile_path)
root_makefile.write('%s: $(GYP_FILES)\n' % makefile_path)
root_makefile.write('\techo ACTION Regenerating $@\n\t%s\n\n' %
gyp.common.EncodePOSIXShellList([gyp_binary, '-fandroid'] +
gyp.RegenerateFlags(options) +
build_files_args))
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid.mk' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
make.ensure_directory_exists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, base_path, output_file,
spec, configs, part_of_all=part_of_all)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Some tools need to know the absolute path of the top directory.
root_makefile.write('GYP_ABS_ANDROID_TOP_DIR := $(shell pwd)\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if generator_flags.get('auto_regeneration', True):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| gpl-2.0 |
glewis17/fuel | tests/test_streams.py | 21 | 2142 | import numpy
from numpy.testing import assert_equal, assert_raises
from fuel.datasets import IterableDataset, IndexableDataset
from fuel.schemes import SequentialExampleScheme, SequentialScheme
from fuel.streams import AbstractDataStream, DataStream
class DummyDataStream(AbstractDataStream):
def reset(self):
pass
def close(self):
pass
def next_epoch(self):
pass
def get_epoch_iterator(self, as_dict=False):
pass
def get_data(self, request=None):
pass
class TestAbstractDataStream(object):
def test_raises_value_error_on_no_scheme_no_produces_examples(self):
stream = DummyDataStream()
assert_raises(ValueError, getattr, stream, 'produces_examples')
def test_raises_value_error_when_setting_produces_examples_if_scheme(self):
stream = DummyDataStream(SequentialExampleScheme(2))
assert_raises(ValueError, setattr, stream, 'produces_examples', True)
class TestDataStream(object):
def setUp(self):
self.dataset = IterableDataset(numpy.eye(2))
def test_sources_setter(self):
stream = DataStream(self.dataset)
stream.sources = ('features',)
assert_equal(stream.sources, ('features',))
def test_no_axis_labels(self):
stream = DataStream(self.dataset)
assert stream.axis_labels is None
def test_axis_labels_on_produces_examples(self):
axis_labels = {'data': ('batch', 'features')}
self.dataset.axis_labels = axis_labels
stream = DataStream(self.dataset)
assert_equal(stream.axis_labels, {'data': ('features',)})
def test_axis_labels_on_produces_batches(self):
dataset = IndexableDataset(numpy.eye(2))
axis_labels = {'data': ('batch', 'features')}
dataset.axis_labels = axis_labels
stream = DataStream(dataset, iteration_scheme=SequentialScheme(2, 2))
assert_equal(stream.axis_labels, axis_labels)
def test_produces_examples(self):
stream = DataStream(self.dataset,
iteration_scheme=SequentialExampleScheme(2))
assert stream.produces_examples
| mit |
sigma-random/asuswrt-merlin | release/src/router/samba36/lib/dnspython/dns/rdtypes/ANY/SOA.py | 246 | 5180 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.name
class SOA(dns.rdata.Rdata):
"""SOA record
@ivar mname: the SOA MNAME (master name) field
@type mname: dns.name.Name object
@ivar rname: the SOA RNAME (responsible name) field
@type rname: dns.name.Name object
@ivar serial: The zone's serial number
@type serial: int
@ivar refresh: The zone's refresh value (in seconds)
@type refresh: int
@ivar retry: The zone's retry value (in seconds)
@type retry: int
@ivar expire: The zone's expiration value (in seconds)
@type expire: int
@ivar minimum: The zone's negative caching time (in seconds, called
"minimum" for historical reasons)
@type minimum: int
@see: RFC 1035"""
__slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire',
'minimum']
def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry,
expire, minimum):
super(SOA, self).__init__(rdclass, rdtype)
self.mname = mname
self.rname = rname
self.serial = serial
self.refresh = refresh
self.retry = retry
self.expire = expire
self.minimum = minimum
def to_text(self, origin=None, relativize=True, **kw):
mname = self.mname.choose_relativity(origin, relativize)
rname = self.rname.choose_relativity(origin, relativize)
return '%s %s %d %d %d %d %d' % (
mname, rname, self.serial, self.refresh, self.retry,
self.expire, self.minimum )
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
mname = tok.get_name()
rname = tok.get_name()
mname = mname.choose_relativity(origin, relativize)
rname = rname.choose_relativity(origin, relativize)
serial = tok.get_uint32()
refresh = tok.get_ttl()
retry = tok.get_ttl()
expire = tok.get_ttl()
minimum = tok.get_ttl()
tok.get_eol()
return cls(rdclass, rdtype, mname, rname, serial, refresh, retry,
expire, minimum )
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.mname.to_wire(file, compress, origin)
self.rname.to_wire(file, compress, origin)
five_ints = struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
file.write(five_ints)
def to_digestable(self, origin = None):
return self.mname.to_digestable(origin) + \
self.rname.to_digestable(origin) + \
struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(mname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
(rname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
if rdlen != 20:
raise dns.exception.FormError
five_ints = struct.unpack('!IIIII',
wire[current : current + rdlen])
if not origin is None:
mname = mname.relativize(origin)
rname = rname.relativize(origin)
return cls(rdclass, rdtype, mname, rname,
five_ints[0], five_ints[1], five_ints[2], five_ints[3],
five_ints[4])
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.mname = self.mname.choose_relativity(origin, relativize)
self.rname = self.rname.choose_relativity(origin, relativize)
def _cmp(self, other):
v = cmp(self.mname, other.mname)
if v == 0:
v = cmp(self.rname, other.rname)
if v == 0:
self_ints = struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
other_ints = struct.pack('!IIIII', other.serial, other.refresh,
other.retry, other.expire,
other.minimum)
v = cmp(self_ints, other_ints)
return v
| gpl-2.0 |
sclabs/sitestatus-nonrel | django/db/models/aggregates.py | 521 | 2101 | """
Classes to represent the definitions of aggregate functions.
"""
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
| bsd-3-clause |
faust64/ansible | lib/ansible/modules/network/panos/panos_nat_policy.py | 25 | 10517 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_nat_policy
short_description: create a policy NAT rule
description:
- Create a policy nat rule. Keep in mind that we can either end up configuring source NAT, destination NAT, or both. Instead of splitting it into two we will make a fair attempt to determine which one the user wants.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
rule_name:
description:
- name of the SNAT rule
required: true
from_zone:
description:
- list of source zones
required: true
to_zone:
description:
- destination zone
required: true
source:
description:
- list of source addresses
required: false
default: ["any"]
destination:
description:
- list of destination addresses
required: false
default: ["any"]
service:
description:
- service
required: false
default: "any"
snat_type:
description:
- type of source translation
required: false
default: None
snat_address:
description:
- snat translated address
required: false
default: None
snat_interface:
description:
- snat interface
required: false
default: None
snat_interface_address:
description:
- snat interface address
required: false
default: None
snat_bidirectional:
description:
- bidirectional flag
required: false
default: "false"
dnat_address:
description:
- dnat translated address
required: false
default: None
dnat_port:
description:
- dnat translated port
required: false
default: None
override:
description:
- attempt to override rule if one with the same name already exists
required: false
default: "false"
commit:
description:
- commit if changed
required: false
default: true
'''
EXAMPLES = '''
# Create a source and destination nat rule
- name: create nat SSH221 rule for 10.0.1.101
panos_nat:
ip_address: "192.168.1.1"
password: "admin"
rule_name: "Web SSH"
from_zone: ["external"]
to_zone: "external"
source: ["any"]
destination: ["10.0.0.100"]
service: "service-tcp-221"
snat_type: "dynamic-ip-and-port"
snat_interface: "ethernet1/2"
dnat_address: "10.0.1.101"
dnat_port: "22"
commit: False
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_NAT_XPATH = "/config/devices/entry[@name='localhost.localdomain']" + \
"/vsys/entry[@name='vsys1']" + \
"/rulebase/nat/rules/entry[@name='%s']"
def nat_rule_exists(xapi, rule_name):
xapi.get(_NAT_XPATH % rule_name)
e = xapi.element_root.find('.//entry')
if e is None:
return False
return True
def dnat_xml(m, dnat_address, dnat_port):
if dnat_address is None and dnat_port is None:
return None
exml = ["<destination-translation>"]
if dnat_address is not None:
exml.append("<translated-address>%s</translated-address>" %
dnat_address)
if dnat_port is not None:
exml.append("<translated-port>%s</translated-port>" %
dnat_port)
exml.append('</destination-translation>')
return ''.join(exml)
def snat_xml(m, snat_type, snat_address, snat_interface,
snat_interface_address, snat_bidirectional):
if snat_type == 'static-ip':
if snat_address is None:
m.fail_json(msg="snat_address should be speicified "
"for snat_type static-ip")
exml = ["<source-translation>", "<static-ip>"]
if snat_bidirectional:
exml.append('<bi-directional>%s</bi-directional>' % 'yes')
else:
exml.append('<bi-directional>%s</bi-directional>' % 'no')
exml.append('<translated-address>%s</translated-address>' %
snat_address)
exml.append('</static-ip>')
exml.append('</source-translation>')
elif snat_type == 'dynamic-ip-and-port':
exml = ["<source-translation>",
"<dynamic-ip-and-port>"]
if snat_interface is not None:
exml = exml + [
"<interface-address>",
"<interface>%s</interface>" % snat_interface]
if snat_interface_address is not None:
exml.append("<ip>%s</ip>" % snat_interface_address)
exml.append("</interface-address>")
elif snat_address is not None:
exml.append("<translated-address>")
for t in snat_address:
exml.append("<member>%s</member>" % t)
exml.append("</translated-address>")
else:
m.fail_json(msg="no snat_interface or snat_address "
"specified for snat_type dynamic-ip-and-port")
exml.append('</dynamic-ip-and-port>')
exml.append('</source-translation>')
else:
m.fail_json(msg="unknown snat_type %s" % snat_type)
return ''.join(exml)
def add_nat(xapi, module, rule_name, from_zone, to_zone,
source, destination, service, dnatxml=None, snatxml=None):
exml = []
if dnatxml:
exml.append(dnatxml)
if snatxml:
exml.append(snatxml)
exml.append("<to><member>%s</member></to>" % to_zone)
exml.append("<from>")
exml = exml + ["<member>%s</member>" % e for e in from_zone]
exml.append("</from>")
exml.append("<source>")
exml = exml + ["<member>%s</member>" % e for e in source]
exml.append("</source>")
exml.append("<destination>")
exml = exml + ["<member>%s</member>" % e for e in destination]
exml.append("</destination>")
exml.append("<service>%s</service>" % service)
exml.append("<nat-type>ipv4</nat-type>")
exml = ''.join(exml)
xapi.set(xpath=_NAT_XPATH % rule_name, element=exml)
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
rule_name=dict(required=True),
from_zone=dict(type='list', required=True),
to_zone=dict(required=True),
source=dict(type='list', default=["any"]),
destination=dict(type='list', default=["any"]),
service=dict(default="any"),
snat_type=dict(),
snat_address=dict(),
snat_interface=dict(),
snat_interface_address=dict(),
snat_bidirectional=dict(default=False),
dnat_address=dict(),
dnat_port=dict(),
override=dict(type='bool', default=False),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
rule_name = module.params['rule_name']
from_zone = module.params['from_zone']
to_zone = module.params['to_zone']
source = module.params['source']
destination = module.params['destination']
service = module.params['service']
snat_type = module.params['snat_type']
snat_address = module.params['snat_address']
snat_interface = module.params['snat_interface']
snat_interface_address = module.params['snat_interface_address']
snat_bidirectional = module.params['snat_bidirectional']
dnat_address = module.params['dnat_address']
dnat_port = module.params['dnat_port']
commit = module.params['commit']
override = module.params["override"]
if not override and nat_rule_exists(xapi, rule_name):
module.exit_json(changed=False, msg="rule exists")
try:
changed = add_nat(
xapi,
module,
rule_name,
from_zone,
to_zone,
source,
destination,
service,
dnatxml=dnat_xml(module, dnat_address, dnat_port),
snatxml=snat_xml(module, snat_type, snat_address,
snat_interface, snat_interface_address,
snat_bidirectional)
)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
if __name__ == '__main__':
main()
| gpl-3.0 |
wskplho/sl4a | python/xmpppy/xmpp/auth.py | 196 | 15633 | ## auth.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: auth.py,v 1.41 2008/09/13 21:45:21 normanr Exp $
"""
Provides library with all Non-SASL and SASL authentication mechanisms.
Can be used both for client and transport authentication.
"""
from protocol import *
from client import PlugIn
import sha,base64,random,dispatcher,re
import md5
def HH(some): return md5.new(some).hexdigest()
def H(some): return md5.new(some).digest()
def C(some): return ':'.join(some)
class NonSASL(PlugIn):
""" Implements old Non-SASL (JEP-0078) authentication used in jabberd1.4 and transport authentication."""
def __init__(self,user,password,resource):
""" Caches username, password and resource for auth. """
PlugIn.__init__(self)
self.DBG_LINE='gen_auth'
self.user=user
self.password=password
self.resource=resource
def plugin(self,owner):
""" Determine the best auth method (digest/0k/plain) and use it for auth.
Returns used method name on success. Used internally. """
if not self.resource: return self.authComponent(owner)
self.DEBUG('Querying server about possible auth methods','start')
resp=owner.Dispatcher.SendAndWaitForResponse(Iq('get',NS_AUTH,payload=[Node('username',payload=[self.user])]))
if not isResultNode(resp):
self.DEBUG('No result node arrived! Aborting...','error')
return
iq=Iq(typ='set',node=resp)
query=iq.getTag('query')
query.setTagData('username',self.user)
query.setTagData('resource',self.resource)
if query.getTag('digest'):
self.DEBUG("Performing digest authentication",'ok')
query.setTagData('digest',sha.new(owner.Dispatcher.Stream._document_attrs['id']+self.password).hexdigest())
if query.getTag('password'): query.delChild('password')
method='digest'
elif query.getTag('token'):
token=query.getTagData('token')
seq=query.getTagData('sequence')
self.DEBUG("Performing zero-k authentication",'ok')
hash = sha.new(sha.new(self.password).hexdigest()+token).hexdigest()
for foo in xrange(int(seq)): hash = sha.new(hash).hexdigest()
query.setTagData('hash',hash)
method='0k'
else:
self.DEBUG("Sequre methods unsupported, performing plain text authentication",'warn')
query.setTagData('password',self.password)
method='plain'
resp=owner.Dispatcher.SendAndWaitForResponse(iq)
if isResultNode(resp):
self.DEBUG('Sucessfully authenticated with remove host.','ok')
owner.User=self.user
owner.Resource=self.resource
owner._registered_name=owner.User+'@'+owner.Server+'/'+owner.Resource
return method
self.DEBUG('Authentication failed!','error')
def authComponent(self,owner):
""" Authenticate component. Send handshake stanza and wait for result. Returns "ok" on success. """
self.handshake=0
owner.send(Node(NS_COMPONENT_ACCEPT+' handshake',payload=[sha.new(owner.Dispatcher.Stream._document_attrs['id']+self.password).hexdigest()]))
owner.RegisterHandler('handshake',self.handshakeHandler,xmlns=NS_COMPONENT_ACCEPT)
while not self.handshake:
self.DEBUG("waiting on handshake",'notify')
owner.Process(1)
owner._registered_name=self.user
if self.handshake+1: return 'ok'
def handshakeHandler(self,disp,stanza):
""" Handler for registering in dispatcher for accepting transport authentication. """
if stanza.getName()=='handshake': self.handshake=1
else: self.handshake=-1
class SASL(PlugIn):
""" Implements SASL authentication. """
def __init__(self,username,password):
PlugIn.__init__(self)
self.username=username
self.password=password
def plugin(self,owner):
if not self._owner.Dispatcher.Stream._document_attrs.has_key('version'): self.startsasl='not-supported'
elif self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self.startsasl=None
def auth(self):
""" Start authentication. Result can be obtained via "SASL.startsasl" attribute and will be
either "success" or "failure". Note that successfull auth will take at least
two Dispatcher.Process() calls. """
if self.startsasl: pass
elif self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def plugout(self):
""" Remove SASL handlers from owner's dispatcher. Used internally. """
if self._owner.__dict__.has_key('features'): self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
if self._owner.__dict__.has_key('challenge'): self._owner.UnregisterHandler('challenge',self.SASLHandler,xmlns=NS_SASL)
if self._owner.__dict__.has_key('failure'): self._owner.UnregisterHandler('failure',self.SASLHandler,xmlns=NS_SASL)
if self._owner.__dict__.has_key('success'): self._owner.UnregisterHandler('success',self.SASLHandler,xmlns=NS_SASL)
def FeaturesHandler(self,conn,feats):
""" Used to determine if server supports SASL auth. Used internally. """
if not feats.getTag('mechanisms',namespace=NS_SASL):
self.startsasl='not-supported'
self.DEBUG('SASL not supported by server','error')
return
mecs=[]
for mec in feats.getTag('mechanisms',namespace=NS_SASL).getTags('mechanism'):
mecs.append(mec.getData())
self._owner.RegisterHandler('challenge',self.SASLHandler,xmlns=NS_SASL)
self._owner.RegisterHandler('failure',self.SASLHandler,xmlns=NS_SASL)
self._owner.RegisterHandler('success',self.SASLHandler,xmlns=NS_SASL)
if "ANONYMOUS" in mecs and self.username == None:
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'ANONYMOUS'})
elif "DIGEST-MD5" in mecs:
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'DIGEST-MD5'})
elif "PLAIN" in mecs:
sasl_data='%s\x00%s\x00%s'%(self.username+'@'+self._owner.Server,self.username,self.password)
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'PLAIN'},payload=[base64.encodestring(sasl_data).replace('\r','').replace('\n','')])
else:
self.startsasl='failure'
self.DEBUG('I can only use DIGEST-MD5 and PLAIN mecanisms.','error')
return
self.startsasl='in-process'
self._owner.send(node.__str__())
raise NodeProcessed
def SASLHandler(self,conn,challenge):
""" Perform next SASL auth step. Used internally. """
if challenge.getNamespace()<>NS_SASL: return
if challenge.getName()=='failure':
self.startsasl='failure'
try: reason=challenge.getChildren()[0]
except: reason=challenge
self.DEBUG('Failed SASL authentification: %s'%reason,'error')
raise NodeProcessed
elif challenge.getName()=='success':
self.startsasl='success'
self.DEBUG('Successfully authenticated with remote server.','ok')
handlers=self._owner.Dispatcher.dumpHandlers()
self._owner.Dispatcher.PlugOut()
dispatcher.Dispatcher().PlugIn(self._owner)
self._owner.Dispatcher.restoreHandlers(handlers)
self._owner.User=self.username
raise NodeProcessed
########################################3333
incoming_data=challenge.getData()
chal={}
data=base64.decodestring(incoming_data)
self.DEBUG('Got challenge:'+data,'ok')
for pair in re.findall('(\w+\s*=\s*(?:(?:"[^"]+")|(?:[^,]+)))',data):
key,value=[x.strip() for x in pair.split('=', 1)]
if value[:1]=='"' and value[-1:]=='"': value=value[1:-1]
chal[key]=value
if chal.has_key('qop') and 'auth' in [x.strip() for x in chal['qop'].split(',')]:
resp={}
resp['username']=self.username
resp['realm']=self._owner.Server
resp['nonce']=chal['nonce']
cnonce=''
for i in range(7):
cnonce+=hex(int(random.random()*65536*4096))[2:]
resp['cnonce']=cnonce
resp['nc']=('00000001')
resp['qop']='auth'
resp['digest-uri']='xmpp/'+self._owner.Server
A1=C([H(C([resp['username'],resp['realm'],self.password])),resp['nonce'],resp['cnonce']])
A2=C(['AUTHENTICATE',resp['digest-uri']])
response= HH(C([HH(A1),resp['nonce'],resp['nc'],resp['cnonce'],resp['qop'],HH(A2)]))
resp['response']=response
resp['charset']='utf-8'
sasl_data=''
for key in ['charset','username','realm','nonce','nc','cnonce','digest-uri','response','qop']:
if key in ['nc','qop','response','charset']: sasl_data+="%s=%s,"%(key,resp[key])
else: sasl_data+='%s="%s",'%(key,resp[key])
########################################3333
node=Node('response',attrs={'xmlns':NS_SASL},payload=[base64.encodestring(sasl_data[:-1]).replace('\r','').replace('\n','')])
self._owner.send(node.__str__())
elif chal.has_key('rspauth'): self._owner.send(Node('response',attrs={'xmlns':NS_SASL}).__str__())
else:
self.startsasl='failure'
self.DEBUG('Failed SASL authentification: unknown challenge','error')
raise NodeProcessed
class Bind(PlugIn):
""" Bind some JID to the current connection to allow router know of our location."""
def __init__(self):
PlugIn.__init__(self)
self.DBG_LINE='bind'
self.bound=None
def plugin(self,owner):
""" Start resource binding, if allowed at this time. Used internally. """
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def plugout(self):
""" Remove Bind handler from owner's dispatcher. Used internally. """
self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def FeaturesHandler(self,conn,feats):
""" Determine if server supports resource binding and set some internal attributes accordingly. """
if not feats.getTag('bind',namespace=NS_BIND):
self.bound='failure'
self.DEBUG('Server does not requested binding.','error')
return
if feats.getTag('session',namespace=NS_SESSION): self.session=1
else: self.session=-1
self.bound=[]
def Bind(self,resource=None):
""" Perform binding. Use provided resource name or random (if not provided). """
while self.bound is None and self._owner.Process(1): pass
if resource: resource=[Node('resource',payload=[resource])]
else: resource=[]
resp=self._owner.SendAndWaitForResponse(Protocol('iq',typ='set',payload=[Node('bind',attrs={'xmlns':NS_BIND},payload=resource)]))
if isResultNode(resp):
self.bound.append(resp.getTag('bind').getTagData('jid'))
self.DEBUG('Successfully bound %s.'%self.bound[-1],'ok')
jid=JID(resp.getTag('bind').getTagData('jid'))
self._owner.User=jid.getNode()
self._owner.Resource=jid.getResource()
resp=self._owner.SendAndWaitForResponse(Protocol('iq',typ='set',payload=[Node('session',attrs={'xmlns':NS_SESSION})]))
if isResultNode(resp):
self.DEBUG('Successfully opened session.','ok')
self.session=1
return 'ok'
else:
self.DEBUG('Session open failed.','error')
self.session=0
elif resp: self.DEBUG('Binding failed: %s.'%resp.getTag('error'),'error')
else:
self.DEBUG('Binding failed: timeout expired.','error')
return ''
class ComponentBind(PlugIn):
""" ComponentBind some JID to the current connection to allow router know of our location."""
def __init__(self, sasl):
PlugIn.__init__(self)
self.DBG_LINE='bind'
self.bound=None
self.needsUnregister=None
self.sasl = sasl
def plugin(self,owner):
""" Start resource binding, if allowed at this time. Used internally. """
if not self.sasl:
self.bound=[]
return
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else:
self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
self.needsUnregister=1
def plugout(self):
""" Remove ComponentBind handler from owner's dispatcher. Used internally. """
if self.needsUnregister:
self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def FeaturesHandler(self,conn,feats):
""" Determine if server supports resource binding and set some internal attributes accordingly. """
if not feats.getTag('bind',namespace=NS_BIND):
self.bound='failure'
self.DEBUG('Server does not requested binding.','error')
return
if feats.getTag('session',namespace=NS_SESSION): self.session=1
else: self.session=-1
self.bound=[]
def Bind(self,domain=None):
""" Perform binding. Use provided domain name (if not provided). """
while self.bound is None and self._owner.Process(1): pass
if self.sasl:
xmlns = NS_COMPONENT_1
else:
xmlns = None
self.bindresponse = None
ttl = dispatcher.DefaultTimeout
self._owner.RegisterHandler('bind',self.BindHandler,xmlns=xmlns)
self._owner.send(Protocol('bind',attrs={'name':domain},xmlns=NS_COMPONENT_1))
while self.bindresponse is None and self._owner.Process(1) and ttl > 0: ttl-=1
self._owner.UnregisterHandler('bind',self.BindHandler,xmlns=xmlns)
resp=self.bindresponse
if resp and resp.getAttr('error'):
self.DEBUG('Binding failed: %s.'%resp.getAttr('error'),'error')
elif resp:
self.DEBUG('Successfully bound.','ok')
return 'ok'
else:
self.DEBUG('Binding failed: timeout expired.','error')
return ''
def BindHandler(self,conn,bind):
self.bindresponse = bind
pass
| apache-2.0 |
fitermay/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/geometry.py | 230 | 24281 | """
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
# Python, ctypes and types dependencies.
import re
import warnings
from ctypes import addressof, byref, c_double, c_size_t
# super-class for mutable list behavior
from django.contrib.gis.geos.mutable_list import ListMixin
# GEOS-related dependencies.
from django.contrib.gis.geos.base import GEOSBase, gdal
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.mutable_list import ListMixin
# All other functions in this module come from the ctypes
# prototypes module -- which handles all interaction with
# the underlying GEOS library.
from django.contrib.gis.geos import prototypes as capi
# These functions provide access to a thread-local instance
# of their corresponding GEOS I/O class.
from django.contrib.gis.geos.prototypes.io import wkt_r, wkt_w, wkb_r, wkb_w, ewkb_w, ewkb_w3d
# For recognizing geometry input.
from django.contrib.gis.geometry.regex import hex_regex, wkt_regex, json_regex
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
ptr_type = GEOM_PTR
#### Python 'magic' routines ####
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, basestring):
if isinstance(geo_input, unicode):
# Encoding to ASCII, WKT or HEXEWKB doesn't need any more.
geo_input = geo_input.encode('ascii')
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'): srid = int(wkt_m.group('srid'))
g = wkt_r().read(wkt_m.group('wkt'))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(geo_input)
elif gdal.GEOJSON and json_regex.match(geo_input):
# Handling GeoJSON input.
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geomtry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, buffer):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if bool(g):
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int): self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
self.__class__ = GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr: capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"WKT is used for the string representation."
return self.wkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return str(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(buffer(wkb))
if not ptr: raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, basestring):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
#### Coordinate Sequence Routines ####
@property
def has_cs(self):
"Returns True if this Geometry has a coordinate sequence, False if not."
# Only these geometries are allowed to have coordinate sequences.
if isinstance(self, (Point, LineString, LinearRing)):
return True
else:
return False
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
#### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr)
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
#### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
if not GEOS_PREPARE:
raise GEOSException('Upgrade GEOS to 3.1 to get validity reason.')
return capi.geos_isvalidreason(self.ptr)
#### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, basestring) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, pattern)
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
#### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0: return None
else: return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
#### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (WKT + SRID) of the Geometry. Note that Z values
are *not* included in this representation because GEOS does not yet
support serializing them.
"""
if self.get_srid(): return 'SRID=%s;%s' % (self.srid, self.wkt)
else: return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w().write(self)
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID and Z values are not included in this representation
because it is not a part of the OGC specification (use the `hexewkb`
property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w().write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID and Z values
that are a part of this geometry.
"""
if self.hasz:
if not GEOS_PREPARE:
# See: http://trac.osgeo.org/geos/ticket/216
raise GEOSException('Upgrade GEOS to 3.1 to get valid 3D HEXEWKB.')
return ewkb_w3d().write_hex(self)
else:
return ewkb_w().write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry if GDAL 1.5+
is installed.
"""
if gdal.GEOJSON:
return self.ogr.json
else:
raise GEOSException('GeoJSON output only supported on GDAL 1.5+.')
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w().write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
and Z values that are a part of this geometry.
"""
if self.hasz:
if not GEOS_PREPARE:
# See: http://trac.osgeo.org/geos/ticket/216
raise GEOSException('Upgrade GEOS to 3.1 to get valid 3D EWKB.')
return ewkb_w3d().write(self)
else:
return ewkb_w().write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
if GEOS_PREPARE:
return PreparedGeometry(self)
else:
raise GEOSException('GEOS 3.1+ required for prepared geometry support.')
#### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if gdal.HAS_GDAL:
if self.srid:
return gdal.OGRGeometry(self.wkb, self.srid)
else:
return gdal.OGRGeometry(self.wkb)
else:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if gdal.HAS_GDAL:
if self.srid:
return gdal.SpatialReference(self.srid)
else:
return None
else:
raise GEOSException('GDAL required to return a SpatialReference object.')
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
warnings.warn("Calling transform() with no SRID set does no transformation!",
stacklevel=2)
warnings.warn("Calling transform() with no SRID will raise GEOSException in v1.5",
FutureWarning, stacklevel=2)
return
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = gdal.OGRGeometry(self.wkb, srid)
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
#### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr)
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
#### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumfrence of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
# Class mapping dictionary. Has to be at the end to avoid import
# conflicts with GEOSGeometry.
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos.collections import GeometryCollection, MultiPoint, MultiLineString, MultiPolygon
GEOS_CLASSES = {0 : Point,
1 : LineString,
2 : LinearRing,
3 : Polygon,
4 : MultiPoint,
5 : MultiLineString,
6 : MultiPolygon,
7 : GeometryCollection,
}
# If supported, import the PreparedGeometry class.
if GEOS_PREPARE:
from django.contrib.gis.geos.prepared import PreparedGeometry
| apache-2.0 |
yinquan529/platform-external-chromium_org | third_party/jinja2/tests.py | 638 | 3444 | # -*- coding: utf-8 -*-
"""
jinja2.tests
~~~~~~~~~~~~
Jinja test functions. Used with the "is" operator.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, mapping_types
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return text_type(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, string_types)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, mapping_types)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, (int, float, complex))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, '__html__')
TESTS = {
'odd': test_odd,
'even': test_even,
'divisibleby': test_divisibleby,
'defined': test_defined,
'undefined': test_undefined,
'none': test_none,
'lower': test_lower,
'upper': test_upper,
'string': test_string,
'mapping': test_mapping,
'number': test_number,
'sequence': test_sequence,
'iterable': test_iterable,
'callable': test_callable,
'sameas': test_sameas,
'escaped': test_escaped
}
| bsd-3-clause |
sidartaoliveira/ansible | test/runner/lib/delegation.py | 24 | 11372 | """Delegate test execution to another environment."""
from __future__ import absolute_import, print_function
import os
import re
import sys
import tempfile
import lib.pytar
import lib.thread
from lib.executor import (
SUPPORTED_PYTHON_VERSIONS,
IntegrationConfig,
ShellConfig,
SanityConfig,
UnitsConfig,
create_shell_command,
)
from lib.test import (
TestConfig,
)
from lib.core_ci import (
AnsibleCoreCI,
)
from lib.manage_ci import (
ManagePosixCI,
)
from lib.util import (
ApplicationError,
EnvironmentConfig,
run_command,
common_environment,
pass_vars,
)
from lib.docker_util import (
docker_exec,
docker_get,
docker_pull,
docker_put,
docker_rm,
docker_run,
)
from lib.cloud import (
get_cloud_providers,
)
def delegate(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:rtype: bool
"""
if isinstance(args, TestConfig):
with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=os.getcwd()) as metadata_fd:
args.metadata_path = os.path.basename(metadata_fd.name)
args.metadata.to_file(args.metadata_path)
try:
return delegate_command(args, exclude, require)
finally:
args.metadata_path = None
else:
return delegate_command(args, exclude, require)
def delegate_command(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:rtype: bool
"""
if args.tox:
delegate_tox(args, exclude, require)
return True
if args.docker:
delegate_docker(args, exclude, require)
return True
if args.remote:
delegate_remote(args, exclude, require)
return True
return False
def delegate_tox(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
"""
if args.python:
versions = args.python,
if args.python not in SUPPORTED_PYTHON_VERSIONS:
raise ApplicationError('tox does not support Python version %s' % args.python)
else:
versions = SUPPORTED_PYTHON_VERSIONS
options = {
'--tox': args.tox_args,
'--tox-sitepackages': 0,
}
for version in versions:
tox = ['tox', '-c', 'test/runner/tox.ini', '-e', 'py' + version.replace('.', '')]
if args.tox_sitepackages:
tox.append('--sitepackages')
tox.append('--')
cmd = generate_command(args, os.path.abspath('test/runner/test.py'), options, exclude, require)
if not args.python:
cmd += ['--python', version]
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'tox-%s' % version]
env = common_environment()
# temporary solution to permit ansible-test delegated to tox to provision remote resources
optional = (
'SHIPPABLE',
'SHIPPABLE_BUILD_ID',
'SHIPPABLE_JOB_NUMBER',
)
env.update(pass_vars(required=[], optional=optional))
run_command(args, tox + cmd, env=env)
def delegate_docker(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
"""
util_image = args.docker_util
test_image = args.docker
privileged = args.docker_privileged
if util_image:
docker_pull(args, util_image)
docker_pull(args, test_image)
util_id = None
test_id = None
options = {
'--docker': 1,
'--docker-privileged': 0,
'--docker-util': 1,
}
cmd = generate_command(args, '/root/ansible/test/runner/test.py', options, exclude, require)
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
image_label = re.sub('^ansible/ansible:', '', args.docker)
image_label = re.sub('[^a-zA-Z0-9]+', '-', image_label)
cmd += ['--coverage-label', 'docker-%s' % image_label]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
cmd_options = []
if isinstance(args, ShellConfig):
cmd_options.append('-it')
with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd:
try:
if not args.explain:
lib.pytar.create_tarfile(local_source_fd.name, '.', lib.pytar.ignore)
if util_image:
util_options = [
'--detach',
]
util_id, _ = docker_run(args, util_image, options=util_options)
if args.explain:
util_id = 'util_id'
else:
util_id = util_id.strip()
else:
util_id = None
test_options = [
'--detach',
'--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro',
'--privileged=%s' % str(privileged).lower(),
]
if util_id:
test_options += [
'--link', '%s:ansible.http.tests' % util_id,
'--link', '%s:sni1.ansible.http.tests' % util_id,
'--link', '%s:sni2.ansible.http.tests' % util_id,
'--link', '%s:fail.ansible.http.tests' % util_id,
'--env', 'HTTPTESTER=1',
]
if isinstance(args, TestConfig):
cloud_platforms = get_cloud_providers(args)
for cloud_platform in cloud_platforms:
test_options += cloud_platform.get_docker_run_options()
test_id, _ = docker_run(args, test_image, options=test_options)
if args.explain:
test_id = 'test_id'
else:
test_id = test_id.strip()
# write temporary files to /root since /tmp isn't ready immediately on container start
docker_put(args, test_id, 'test/runner/setup/docker.sh', '/root/docker.sh')
docker_exec(args, test_id, ['/bin/bash', '/root/docker.sh'])
docker_put(args, test_id, local_source_fd.name, '/root/ansible.tgz')
docker_exec(args, test_id, ['mkdir', '/root/ansible'])
docker_exec(args, test_id, ['tar', 'oxzf', '/root/ansible.tgz', '-C', '/root/ansible'])
# docker images are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
try:
docker_exec(args, test_id, cmd, options=cmd_options)
finally:
with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd:
docker_exec(args, test_id, ['tar', 'czf', '/root/results.tgz', '-C', '/root/ansible/test', 'results'])
docker_get(args, test_id, '/root/results.tgz', local_result_fd.name)
run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', 'test'])
finally:
if util_id:
docker_rm(args, util_id)
if test_id:
docker_rm(args, test_id)
def delegate_remote(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
"""
parts = args.remote.split('/', 1)
platform = parts[0]
version = parts[1]
core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage)
success = False
try:
core_ci.start()
core_ci.wait()
options = {
'--remote': 1,
}
cmd = generate_command(args, 'ansible/test/runner/test.py', options, exclude, require)
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'remote-%s-%s' % (platform, version)]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
# remote instances are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
manage = ManagePosixCI(core_ci)
manage.setup()
ssh_options = []
if isinstance(args, TestConfig):
cloud_platforms = get_cloud_providers(args)
for cloud_platform in cloud_platforms:
ssh_options += cloud_platform.get_remote_ssh_options()
try:
manage.ssh(cmd, ssh_options)
success = True
finally:
manage.ssh('rm -rf /tmp/results && cp -a ansible/test/results /tmp/results')
manage.download('/tmp/results', 'test')
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
core_ci.stop()
def generate_command(args, path, options, exclude, require):
"""
:type args: EnvironmentConfig
:type path: str
:type options: dict[str, int]
:type exclude: list[str]
:type require: list[str]
:rtype: list[str]
"""
options['--color'] = 1
cmd = [path]
cmd += list(filter_options(args, sys.argv[1:], options, exclude, require))
cmd += ['--color', 'yes' if args.color else 'no']
if args.requirements:
cmd += ['--requirements']
if isinstance(args, ShellConfig):
cmd = create_shell_command(cmd)
elif isinstance(args, SanityConfig):
if args.base_branch:
cmd += ['--base-branch', args.base_branch]
return cmd
def filter_options(args, argv, options, exclude, require):
"""
:type args: EnvironmentConfig
:type argv: list[str]
:type options: dict[str, int]
:type exclude: list[str]
:type require: list[str]
:rtype: collections.Iterable[str]
"""
options = options.copy()
options['--requirements'] = 0
if isinstance(args, TestConfig):
options.update({
'--changed': 0,
'--tracked': 0,
'--untracked': 0,
'--ignore-committed': 0,
'--ignore-staged': 0,
'--ignore-unstaged': 0,
'--changed-from': 1,
'--changed-path': 1,
'--metadata': 1,
})
elif isinstance(args, SanityConfig):
options.update({
'--base-branch': 1,
})
remaining = 0
for arg in argv:
if not arg.startswith('-') and remaining:
remaining -= 1
continue
remaining = 0
parts = arg.split('=', 1)
key = parts[0]
if key in options:
remaining = options[key] - len(parts) + 1
continue
yield arg
for target in exclude:
yield '--exclude'
yield target
for target in require:
yield '--require'
yield target
if isinstance(args, TestConfig):
if args.metadata_path:
yield '--metadata'
yield args.metadata_path
| gpl-3.0 |
thomastu/django-wiki | wiki/plugins/attachments/settings.py | 16 | 3502 | from __future__ import absolute_import
from __future__ import unicode_literals
from django import VERSION
from django.conf import settings as django_settings
from wiki.conf import settings as wiki_settings
from django.core.exceptions import ImproperlyConfigured
# This is not used in django 1.7+
APP_LABEL = 'attachments' if VERSION < (1, 7) else None
SLUG = "attachments"
# Please see this note about support for UTF-8 files on django/apache:
# https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/modwsgi/#if-you-get-a-unicodeencodeerror
# Allow anonymous users upload access (not nice on an open network)
# WIKI_ATTACHMENTS_ANONYMOUS can override this, otherwise the default
# in wiki.conf.settings is used.
ANONYMOUS = getattr(
django_settings,
'WIKI_ATTACHMENTS_ANONYMOUS',
wiki_settings.ANONYMOUS_UPLOAD)
# Maximum file sizes: Please using something like LimitRequestBody on
# your web server.
# http://httpd.apache.org/docs/2.2/mod/core.html#LimitRequestBody
# Where to store article attachments, relative to MEDIA_ROOT
# You should NEVER enable directory indexing in MEDIA_ROOT/UPLOAD_PATH !
# Actually, you can completely disable serving it, if you want. Files are
# sent to the user through a Django view that reads and streams a file.
UPLOAD_PATH = getattr(
django_settings,
'WIKI_ATTACHMENTS_PATH',
'wiki/attachments/%aid/')
# Should the upload path be obscurified? If so, a random hash will be added to the path
# such that someone can not guess the location of files (if you have
# restricted permissions and the files are still located within the web
# server's
UPLOAD_PATH_OBSCURIFY = getattr(
django_settings,
'WIKI_ATTACHMENTS_PATH_OBSCURIFY',
True)
# Allowed extensions. Empty to disallow uploads completely.
# No files are saved without appending ".upload" to the file to ensure that
# your web server never actually executes some script.
# Case insensitive.
# You are asked to explicitly enter all file extensions that you want
# to allow. For your own safety.
FILE_EXTENSIONS = getattr(
django_settings, 'WIKI_ATTACHMENTS_EXTENSIONS',
['pdf', 'doc', 'odt', 'docx', 'txt'])
# Storage backend to use, default is to use the same as the rest of the
# wiki, which is set in WIKI_STORAGE_BACKEND, but you can override it
# with WIKI_ATTACHMENTS_STORAGE_BACKEND
STORAGE_BACKEND = getattr(
django_settings,
'WIKI_ATTACHMENTS_STORAGE_BACKEND',
wiki_settings.STORAGE_BACKEND)
# SAFETY FIRST! Only store files with an appended .upload extension to be sure
# that something nasty does not get executed on the server.
APPEND_EXTENSION = getattr(
django_settings,
'WIKI_ATTACHMENTS_APPEND_EXTENSION',
True)
# Important for S3 backends etc.: If your storage backend does not have a .path
# attribute for the file, but only a .url attribute, you should use False.
# This will reveal the direct download URL so it does not work perfectly for
# files you wish to be kept private.
USE_LOCAL_PATH = getattr(django_settings, 'WIKI_ATTACHMENTS_LOCAL_PATH', True)
if (not USE_LOCAL_PATH) and APPEND_EXTENSION:
raise ImproperlyConfigured(
"django-wiki (attachment plugin) not USE_LOCAL_PATH and APPEND_EXTENSION: "
"You have configured to append .upload and not use local paths. That won't "
"work as all your attachments will be stored and sent with a .upload "
"extension. You have to trust your storage backend to be safe for storing"
"the extensions you have allowed.")
| gpl-3.0 |
hsu1994/Terminator | Server/RelyON/boost_1_61_0/tools/build/src/build/project.py | 11 | 50988 | # Status: ported.
# Base revision: 64488
# Copyright 2002, 2003 Dave Abrahams
# Copyright 2002, 2005, 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Implements project representation and loading. Each project is represented
# by:
# - a module where all the Jamfile content live.
# - an instance of 'project-attributes' class.
# (given a module name, can be obtained using the 'attributes' rule)
# - an instance of 'project-target' class (from targets.jam)
# (given a module name, can be obtained using the 'target' rule)
#
# Typically, projects are created as result of loading a Jamfile, which is done
# by rules 'load' and 'initialize', below. First, module for Jamfile is loaded
# and new project-attributes instance is created. Some rules necessary for
# project are added to the module (see 'project-rules' module) at the bottom of
# this file. Default project attributes are set (inheriting attributes of
# parent project, if it exists). After that the Jamfile is read. It can declare
# its own attributes using the 'project' rule which will be combined with any
# already set attributes.
#
# The 'project' rule can also declare a project id which will be associated
# with the project module.
#
# There can also be 'standalone' projects. They are created by calling
# 'initialize' on an arbitrary module and not specifying their location. After
# the call, the module can call the 'project' rule, declare main targets and
# behave as a regular project except that, since it is not associated with any
# location, it should only declare prebuilt targets.
#
# The list of all loaded Jamfiles is stored in the .project-locations variable.
# It is possible to obtain a module name for a location using the 'module-name'
# rule. Standalone projects are not recorded and can only be references using
# their project id.
import b2.util.path
import b2.build.targets
from b2.build import property_set, property
from b2.build.errors import ExceptionWithUserContext
from b2.manager import get_manager
import bjam
import b2
import re
import sys
import pkgutil
import os
import string
import imp
import traceback
import b2.util.option as option
from b2.util import (
record_jam_to_value_mapping, qualify_jam_action, is_iterable_typed, bjam_signature,
is_iterable)
class ProjectRegistry:
def __init__(self, manager, global_build_dir):
self.manager = manager
self.global_build_dir = global_build_dir
self.project_rules_ = ProjectRules(self)
# The target corresponding to the project being loaded now
self.current_project = None
# The set of names of loaded project modules
self.jamfile_modules = {}
# Mapping from location to module name
self.location2module = {}
# Mapping from project id to project module
self.id2module = {}
# Map from Jamfile directory to parent Jamfile/Jamroot
# location.
self.dir2parent_jamfile = {}
# Map from directory to the name of Jamfile in
# that directory (or None).
self.dir2jamfile = {}
# Map from project module to attributes object.
self.module2attributes = {}
# Map from project module to target for the project
self.module2target = {}
# Map from names to Python modules, for modules loaded
# via 'using' and 'import' rules in Jamfiles.
self.loaded_tool_modules_ = {}
self.loaded_tool_module_path_ = {}
# Map from project target to the list of
# (id,location) pairs corresponding to all 'use-project'
# invocations.
# TODO: should not have a global map, keep this
# in ProjectTarget.
self.used_projects = {}
self.saved_current_project = []
self.JAMROOT = self.manager.getenv("JAMROOT");
# Note the use of character groups, as opposed to listing
# 'Jamroot' and 'jamroot'. With the latter, we'd get duplicate
# matches on windows and would have to eliminate duplicates.
if not self.JAMROOT:
self.JAMROOT = ["project-root.jam", "[Jj]amroot", "[Jj]amroot.jam"]
# Default patterns to search for the Jamfiles to use for build
# declarations.
self.JAMFILE = self.manager.getenv("JAMFILE")
if not self.JAMFILE:
self.JAMFILE = ["[Bb]uild.jam", "[Jj]amfile.v2", "[Jj]amfile",
"[Jj]amfile.jam"]
self.__python_module_cache = {}
def load (self, jamfile_location):
"""Loads jamfile at the given location. After loading, project global
file and jamfile needed by the loaded one will be loaded recursively.
If the jamfile at that location is loaded already, does nothing.
Returns the project module for the Jamfile."""
assert isinstance(jamfile_location, basestring)
absolute = os.path.join(os.getcwd(), jamfile_location)
absolute = os.path.normpath(absolute)
jamfile_location = b2.util.path.relpath(os.getcwd(), absolute)
mname = self.module_name(jamfile_location)
# If Jamfile is already loaded, do not try again.
if not mname in self.jamfile_modules:
if "--debug-loading" in self.manager.argv():
print "Loading Jamfile at '%s'" % jamfile_location
self.load_jamfile(jamfile_location, mname)
# We want to make sure that child project are loaded only
# after parent projects. In particular, because parent projects
# define attributes which are inherited by children, and we do not
# want children to be loaded before parents has defined everything.
#
# While "build-project" and "use-project" can potentially refer
# to child projects from parent projects, we do not immediately
# load child projects when seing those attributes. Instead,
# we record the minimal information that will be used only later.
self.load_used_projects(mname)
return mname
def load_used_projects(self, module_name):
assert isinstance(module_name, basestring)
# local used = [ modules.peek $(module-name) : .used-projects ] ;
used = self.used_projects[module_name]
location = self.attribute(module_name, "location")
for u in used:
id = u[0]
where = u[1]
self.use(id, os.path.join(location, where))
def load_parent(self, location):
"""Loads parent of Jamfile at 'location'.
Issues an error if nothing is found."""
assert isinstance(location, basestring)
found = b2.util.path.glob_in_parents(
location, self.JAMROOT + self.JAMFILE)
if not found:
print "error: Could not find parent for project at '%s'" % location
print "error: Did not find Jamfile or project-root.jam in any parent directory."
sys.exit(1)
return self.load(os.path.dirname(found[0]))
def find(self, name, current_location):
"""Given 'name' which can be project-id or plain directory name,
return project module corresponding to that id or directory.
Returns nothing of project is not found."""
assert isinstance(name, basestring)
assert isinstance(current_location, basestring)
project_module = None
# Try interpreting name as project id.
if name[0] == '/':
project_module = self.id2module.get(name)
if not project_module:
location = os.path.join(current_location, name)
# If no project is registered for the given location, try to
# load it. First see if we have Jamfile. If not we might have project
# root, willing to act as Jamfile. In that case, project-root
# must be placed in the directory referred by id.
project_module = self.module_name(location)
if not project_module in self.jamfile_modules:
if b2.util.path.glob([location], self.JAMROOT + self.JAMFILE):
project_module = self.load(location)
else:
project_module = None
return project_module
def module_name(self, jamfile_location):
"""Returns the name of module corresponding to 'jamfile-location'.
If no module corresponds to location yet, associates default
module name with that location."""
assert isinstance(jamfile_location, basestring)
module = self.location2module.get(jamfile_location)
if not module:
# Root the path, so that locations are always umbiguious.
# Without this, we can't decide if '../../exe/program1' and '.'
# are the same paths, or not.
jamfile_location = os.path.realpath(
os.path.join(os.getcwd(), jamfile_location))
module = "Jamfile<%s>" % jamfile_location
self.location2module[jamfile_location] = module
return module
def find_jamfile (self, dir, parent_root=0, no_errors=0):
"""Find the Jamfile at the given location. This returns the
exact names of all the Jamfiles in the given directory. The optional
parent-root argument causes this to search not the given directory
but the ones above it up to the directory given in it."""
assert isinstance(dir, basestring)
assert isinstance(parent_root, (int, bool))
assert isinstance(no_errors, (int, bool))
# Glob for all the possible Jamfiles according to the match pattern.
#
jamfile_glob = None
if parent_root:
parent = self.dir2parent_jamfile.get(dir)
if not parent:
parent = b2.util.path.glob_in_parents(dir,
self.JAMFILE)
self.dir2parent_jamfile[dir] = parent
jamfile_glob = parent
else:
jamfile = self.dir2jamfile.get(dir)
if not jamfile:
jamfile = b2.util.path.glob([dir], self.JAMFILE)
self.dir2jamfile[dir] = jamfile
jamfile_glob = jamfile
if len(jamfile_glob) > 1:
# Multiple Jamfiles found in the same place. Warn about this.
# And ensure we use only one of them.
# As a temporary convenience measure, if there's Jamfile.v2 amount
# found files, suppress the warning and use it.
#
pattern = "(.*[Jj]amfile\\.v2)|(.*[Bb]uild\\.jam)"
v2_jamfiles = [x for x in jamfile_glob if re.match(pattern, x)]
if len(v2_jamfiles) == 1:
jamfile_glob = v2_jamfiles
else:
print """warning: Found multiple Jamfiles at '%s'!""" % (dir)
for j in jamfile_glob:
print " -", j
print "Loading the first one"
# Could not find it, error.
if not no_errors and not jamfile_glob:
self.manager.errors()(
"""Unable to load Jamfile.
Could not find a Jamfile in directory '%s'
Attempted to find it with pattern '%s'.
Please consult the documentation at 'http://boost.org/boost-build2'."""
% (dir, string.join(self.JAMFILE)))
if jamfile_glob:
return jamfile_glob[0]
def load_jamfile(self, dir, jamfile_module):
"""Load a Jamfile at the given directory. Returns nothing.
Will attempt to load the file as indicated by the JAMFILE patterns.
Effect of calling this rule twice with the same 'dir' is underfined."""
assert isinstance(dir, basestring)
assert isinstance(jamfile_module, basestring)
# See if the Jamfile is where it should be.
is_jamroot = False
jamfile_to_load = b2.util.path.glob([dir], self.JAMROOT)
if not jamfile_to_load:
jamfile_to_load = self.find_jamfile(dir)
else:
if len(jamfile_to_load) > 1:
get_manager().errors()("Multiple Jamfiles found at '%s'\n" +\
"Filenames are: %s"
% (dir, [os.path.basename(j) for j in jamfile_to_load]))
is_jamroot = True
jamfile_to_load = jamfile_to_load[0]
dir = os.path.dirname(jamfile_to_load)
if not dir:
dir = "."
self.used_projects[jamfile_module] = []
# Now load the Jamfile in it's own context.
# The call to 'initialize' may load parent Jamfile, which might have
# 'use-project' statement that causes a second attempt to load the
# same project we're loading now. Checking inside .jamfile-modules
# prevents that second attempt from messing up.
if not jamfile_module in self.jamfile_modules:
self.jamfile_modules[jamfile_module] = True
# Initialize the jamfile module before loading.
#
self.initialize(jamfile_module, dir, os.path.basename(jamfile_to_load))
saved_project = self.current_project
bjam.call("load", jamfile_module, jamfile_to_load)
basename = os.path.basename(jamfile_to_load)
if is_jamroot:
jamfile = self.find_jamfile(dir, no_errors=True)
if jamfile:
bjam.call("load", jamfile_module, jamfile)
# Now do some checks
if self.current_project != saved_project:
self.manager.errors()(
"""The value of the .current-project variable
has magically changed after loading a Jamfile.
This means some of the targets might be defined a the wrong project.
after loading %s
expected value %s
actual value %s""" % (jamfile_module, saved_project, self.current_project))
if self.global_build_dir:
id = self.attributeDefault(jamfile_module, "id", None)
project_root = self.attribute(jamfile_module, "project-root")
location = self.attribute(jamfile_module, "location")
if location and project_root == dir:
# This is Jamroot
if not id:
# FIXME: go via errors module, so that contexts are
# shown?
print "warning: the --build-dir option was specified"
print "warning: but Jamroot at '%s'" % dir
print "warning: specified no project id"
print "warning: the --build-dir option will be ignored"
def load_standalone(self, jamfile_module, file):
"""Loads 'file' as standalone project that has no location
associated with it. This is mostly useful for user-config.jam,
which should be able to define targets, but although it has
some location in filesystem, we do not want any build to
happen in user's HOME, for example.
The caller is required to never call this method twice on
the same file.
"""
assert isinstance(jamfile_module, basestring)
assert isinstance(file, basestring)
self.used_projects[jamfile_module] = []
bjam.call("load", jamfile_module, file)
self.load_used_projects(jamfile_module)
def is_jamroot(self, basename):
assert isinstance(basename, basestring)
match = [ pat for pat in self.JAMROOT if re.match(pat, basename)]
if match:
return 1
else:
return 0
def initialize(self, module_name, location=None, basename=None):
"""Initialize the module for a project.
module-name is the name of the project module.
location is the location (directory) of the project to initialize.
If not specified, standalone project will be initialized
"""
assert isinstance(module_name, basestring)
assert isinstance(location, basestring) or location is None
assert isinstance(basename, basestring) or basename is None
if "--debug-loading" in self.manager.argv():
print "Initializing project '%s'" % module_name
# TODO: need to consider if standalone projects can do anything but defining
# prebuilt targets. If so, we need to give more sensible "location", so that
# source paths are correct.
if not location:
location = ""
attributes = ProjectAttributes(self.manager, location, module_name)
self.module2attributes[module_name] = attributes
python_standalone = False
if location:
attributes.set("source-location", [location], exact=1)
elif not module_name in ["test-config", "site-config", "user-config", "project-config"]:
# This is a standalone project with known location. Set source location
# so that it can declare targets. This is intended so that you can put
# a .jam file in your sources and use it via 'using'. Standard modules
# (in 'tools' subdir) may not assume source dir is set.
attributes.set("source-location", self.loaded_tool_module_path_[module_name], exact=1)
python_standalone = True
attributes.set("requirements", property_set.empty(), exact=True)
attributes.set("usage-requirements", property_set.empty(), exact=True)
attributes.set("default-build", property_set.empty(), exact=True)
attributes.set("projects-to-build", [], exact=True)
attributes.set("project-root", None, exact=True)
attributes.set("build-dir", None, exact=True)
self.project_rules_.init_project(module_name, python_standalone)
jamroot = False
parent_module = None;
if module_name == "test-config":
# No parent
pass
elif module_name == "site-config":
parent_module = "test-config"
elif module_name == "user-config":
parent_module = "site-config"
elif module_name == "project-config":
parent_module = "user-config"
elif location and not self.is_jamroot(basename):
# We search for parent/project-root only if jamfile was specified
# --- i.e
# if the project is not standalone.
parent_module = self.load_parent(location)
else:
# It's either jamroot, or standalone project.
# If it's jamroot, inherit from user-config.
if location:
# If project-config module exist, inherit from it.
if self.module2attributes.has_key("project-config"):
parent_module = "project-config"
else:
parent_module = "user-config" ;
jamroot = True ;
if parent_module:
self.inherit_attributes(module_name, parent_module)
attributes.set("parent-module", parent_module, exact=1)
if jamroot:
attributes.set("project-root", location, exact=1)
parent = None
if parent_module:
parent = self.target(parent_module)
if not self.module2target.has_key(module_name):
target = b2.build.targets.ProjectTarget(self.manager,
module_name, module_name, parent,
self.attribute(module_name, "requirements"),
# FIXME: why we need to pass this? It's not
# passed in jam code.
self.attribute(module_name, "default-build"))
self.module2target[module_name] = target
self.current_project = self.target(module_name)
def inherit_attributes(self, project_module, parent_module):
"""Make 'project-module' inherit attributes of project
root and parent module."""
assert isinstance(project_module, basestring)
assert isinstance(parent_module, basestring)
attributes = self.module2attributes[project_module]
pattributes = self.module2attributes[parent_module]
# Parent module might be locationless user-config.
# FIXME:
#if [ modules.binding $(parent-module) ]
#{
# $(attributes).set parent : [ path.parent
# [ path.make [ modules.binding $(parent-module) ] ] ] ;
# }
attributes.set("project-root", pattributes.get("project-root"), exact=True)
attributes.set("default-build", pattributes.get("default-build"), exact=True)
attributes.set("requirements", pattributes.get("requirements"), exact=True)
attributes.set("usage-requirements",
pattributes.get("usage-requirements"), exact=1)
parent_build_dir = pattributes.get("build-dir")
if parent_build_dir:
# Have to compute relative path from parent dir to our dir
# Convert both paths to absolute, since we cannot
# find relative path from ".." to "."
location = attributes.get("location")
parent_location = pattributes.get("location")
our_dir = os.path.join(os.getcwd(), location)
parent_dir = os.path.join(os.getcwd(), parent_location)
build_dir = os.path.join(parent_build_dir,
os.path.relpath(our_dir, parent_dir))
attributes.set("build-dir", build_dir, exact=True)
def register_id(self, id, module):
"""Associate the given id with the given project module."""
assert isinstance(id, basestring)
assert isinstance(module, basestring)
self.id2module[id] = module
def current(self):
"""Returns the project which is currently being loaded."""
return self.current_project
def set_current(self, c):
if __debug__:
from .targets import ProjectTarget
assert isinstance(c, ProjectTarget)
self.current_project = c
def push_current(self, project):
"""Temporary changes the current project to 'project'. Should
be followed by 'pop-current'."""
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
self.saved_current_project.append(self.current_project)
self.current_project = project
def pop_current(self):
self.current_project = self.saved_current_project[-1]
del self.saved_current_project[-1]
def attributes(self, project):
"""Returns the project-attribute instance for the
specified jamfile module."""
assert isinstance(project, basestring)
return self.module2attributes[project]
def attribute(self, project, attribute):
"""Returns the value of the specified attribute in the
specified jamfile module."""
assert isinstance(project, basestring)
assert isinstance(attribute, basestring)
try:
return self.module2attributes[project].get(attribute)
except:
raise BaseException("No attribute '%s' for project" % (attribute, project))
def attributeDefault(self, project, attribute, default):
"""Returns the value of the specified attribute in the
specified jamfile module."""
assert isinstance(project, basestring)
assert isinstance(attribute, basestring)
assert isinstance(default, basestring) or default is None
return self.module2attributes[project].getDefault(attribute, default)
def target(self, project_module):
"""Returns the project target corresponding to the 'project-module'."""
assert isinstance(project_module, basestring)
if not self.module2target.has_key(project_module):
self.module2target[project_module] = \
b2.build.targets.ProjectTarget(project_module, project_module,
self.attribute(project_module, "requirements"))
return self.module2target[project_module]
def use(self, id, location):
# Use/load a project.
assert isinstance(id, basestring)
assert isinstance(location, basestring)
saved_project = self.current_project
project_module = self.load(location)
declared_id = self.attributeDefault(project_module, "id", "")
if not declared_id or declared_id != id:
# The project at 'location' either have no id or
# that id is not equal to the 'id' parameter.
if self.id2module.has_key(id) and self.id2module[id] != project_module:
self.manager.errors()(
"""Attempt to redeclare already existing project id '%s' at location '%s'""" % (id, location))
self.id2module[id] = project_module
self.current_module = saved_project
def add_rule(self, name, callable_):
"""Makes rule 'name' available to all subsequently loaded Jamfiles.
Calling that rule wil relay to 'callable'."""
assert isinstance(name, basestring)
assert callable(callable_)
self.project_rules_.add_rule(name, callable_)
def project_rules(self):
return self.project_rules_
def glob_internal(self, project, wildcards, excludes, rule_name):
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
assert is_iterable_typed(wildcards, basestring)
assert is_iterable_typed(excludes, basestring) or excludes is None
assert isinstance(rule_name, basestring)
location = project.get("source-location")[0]
result = []
callable = b2.util.path.__dict__[rule_name]
paths = callable([location], wildcards, excludes)
has_dir = 0
for w in wildcards:
if os.path.dirname(w):
has_dir = 1
break
if has_dir or rule_name != "glob":
result = []
# The paths we've found are relative to current directory,
# but the names specified in sources list are assumed to
# be relative to source directory of the corresponding
# prject. Either translate them or make absolute.
for p in paths:
rel = os.path.relpath(p, location)
# If the path is below source location, use relative path.
if not ".." in rel:
result.append(rel)
else:
# Otherwise, use full path just to avoid any ambiguities.
result.append(os.path.abspath(p))
else:
# There were not directory in wildcard, so the files are all
# in the source directory of the project. Just drop the
# directory, instead of making paths absolute.
result = [os.path.basename(p) for p in paths]
return result
def __build_python_module_cache(self):
"""Recursively walks through the b2/src subdirectories and
creates an index of base module name to package name. The
index is stored within self.__python_module_cache and allows
for an O(1) module lookup.
For example, given the base module name `toolset`,
self.__python_module_cache['toolset'] will return
'b2.build.toolset'
pkgutil.walk_packages() will find any python package
provided a directory contains an __init__.py. This has the
added benefit of allowing libraries to be installed and
automatically avaiable within the contrib directory.
*Note*: pkgutil.walk_packages() will import any subpackage
in order to access its __path__variable. Meaning:
any initialization code will be run if the package hasn't
already been imported.
"""
cache = {}
for importer, mname, ispkg in pkgutil.walk_packages(b2.__path__, prefix='b2.'):
basename = mname.split('.')[-1]
# since the jam code is only going to have "import toolset ;"
# it doesn't matter if there are separately named "b2.build.toolset" and
# "b2.contrib.toolset" as it is impossible to know which the user is
# referring to.
if basename in cache:
self.manager.errors()('duplicate module name "{0}" '
'found in boost-build path'.format(basename))
cache[basename] = mname
self.__python_module_cache = cache
def load_module(self, name, extra_path=None):
"""Load a Python module that should be useable from Jamfiles.
There are generally two types of modules Jamfiles might want to
use:
- Core Boost.Build. Those are imported using plain names, e.g.
'toolset', so this function checks if we have module named
b2.package.module already.
- Python modules in the same directory as Jamfile. We don't
want to even temporary add Jamfile's directory to sys.path,
since then we might get naming conflicts between standard
Python modules and those.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(extra_path, basestring) or extra_path is None
# See if we loaded module of this name already
existing = self.loaded_tool_modules_.get(name)
if existing:
return existing
# check the extra path as well as any paths outside
# of the b2 package and import the module if it exists
b2_path = os.path.normpath(b2.__path__[0])
# normalize the pathing in the BOOST_BUILD_PATH.
# this allows for using startswith() to determine
# if a path is a subdirectory of the b2 root_path
paths = [os.path.normpath(p) for p in self.manager.boost_build_path()]
# remove all paths that start with b2's root_path
paths = [p for p in paths if not p.startswith(b2_path)]
# add any extra paths
paths.extend(extra_path)
try:
# find_module is used so that the pyc's can be used.
# an ImportError is raised if not found
f, location, description = imp.find_module(name, paths)
mname = name + "__for_jamfile"
self.loaded_tool_module_path_[mname] = location
module = imp.load_module(mname, f, location, description)
self.loaded_tool_modules_[name] = module
return module
except ImportError:
# if the module is not found in the b2 package,
# this error will be handled later
pass
# the cache is created here due to possibly importing packages
# that end up calling get_manager() which might fail
if not self.__python_module_cache:
self.__build_python_module_cache()
underscore_name = name.replace('-', '_')
# check to see if the module is within the b2 package
# and already loaded
mname = self.__python_module_cache.get(underscore_name)
if mname in sys.modules:
return sys.modules[mname]
# otherwise, if the module name is within the cache,
# the module exists within the BOOST_BUILD_PATH,
# load it.
elif mname:
# __import__ can be used here since the module
# is guaranteed to be found under the `b2` namespace.
__import__(mname)
module = sys.modules[mname]
self.loaded_tool_modules_[name] = module
self.loaded_tool_module_path_[mname] = module.__file__
return module
self.manager.errors()("Cannot find module '%s'" % name)
# FIXME:
# Defines a Boost.Build extension project. Such extensions usually
# contain library targets and features that can be used by many people.
# Even though extensions are really projects, they can be initialize as
# a module would be with the "using" (project.project-rules.using)
# mechanism.
#rule extension ( id : options * : * )
#{
# # The caller is a standalone module for the extension.
# local mod = [ CALLER_MODULE ] ;
#
# # We need to do the rest within the extension module.
# module $(mod)
# {
# import path ;
#
# # Find the root project.
# local root-project = [ project.current ] ;
# root-project = [ $(root-project).project-module ] ;
# while
# [ project.attribute $(root-project) parent-module ] &&
# [ project.attribute $(root-project) parent-module ] != user-config
# {
# root-project = [ project.attribute $(root-project) parent-module ] ;
# }
#
# # Create the project data, and bring in the project rules
# # into the module.
# project.initialize $(__name__) :
# [ path.join [ project.attribute $(root-project) location ] ext $(1:L) ] ;
#
# # Create the project itself, i.e. the attributes.
# # All extensions are created in the "/ext" project space.
# project /ext/$(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ;
# local attributes = [ project.attributes $(__name__) ] ;
#
# # Inherit from the root project of whomever is defining us.
# project.inherit-attributes $(__name__) : $(root-project) ;
# $(attributes).set parent-module : $(root-project) : exact ;
# }
#}
class ProjectAttributes:
"""Class keeping all the attributes of a project.
The standard attributes are 'id', "location", "project-root", "parent"
"requirements", "default-build", "source-location" and "projects-to-build".
"""
def __init__(self, manager, location, project_module):
self.manager = manager
self.location = location
self.project_module = project_module
self.attributes = {}
self.usage_requirements = None
def set(self, attribute, specification, exact=False):
"""Set the named attribute from the specification given by the user.
The value actually set may be different."""
assert isinstance(attribute, basestring)
assert isinstance(exact, (int, bool))
if __debug__ and not exact:
if attribute == 'requirements':
assert (isinstance(specification, property_set.PropertySet)
or all(isinstance(s, basestring) for s in specification))
elif attribute in (
'usage-requirements', 'default-build', 'source-location', 'build-dir', 'id'):
assert is_iterable_typed(specification, basestring)
elif __debug__:
assert (
isinstance(specification, (property_set.PropertySet, type(None), basestring))
or all(isinstance(s, basestring) for s in specification)
)
if exact:
self.__dict__[attribute] = specification
elif attribute == "requirements":
self.requirements = property_set.refine_from_user_input(
self.requirements, specification,
self.project_module, self.location)
elif attribute == "usage-requirements":
unconditional = []
for p in specification:
split = property.split_conditional(p)
if split:
unconditional.append(split[1])
else:
unconditional.append(p)
non_free = property.remove("free", unconditional)
if non_free:
get_manager().errors()("usage-requirements %s have non-free properties %s" \
% (specification, non_free))
t = property.translate_paths(
property.create_from_strings(specification, allow_condition=True),
self.location)
existing = self.__dict__.get("usage-requirements")
if existing:
new = property_set.create(existing.all() + t)
else:
new = property_set.create(t)
self.__dict__["usage-requirements"] = new
elif attribute == "default-build":
self.__dict__["default-build"] = property_set.create(specification)
elif attribute == "source-location":
source_location = []
for path in specification:
source_location.append(os.path.join(self.location, path))
self.__dict__["source-location"] = source_location
elif attribute == "build-dir":
self.__dict__["build-dir"] = os.path.join(self.location, specification[0])
elif attribute == "id":
id = specification[0]
if id[0] != '/':
id = "/" + id
self.manager.projects().register_id(id, self.project_module)
self.__dict__["id"] = id
elif not attribute in ["default-build", "location",
"source-location", "parent",
"projects-to-build", "project-root"]:
self.manager.errors()(
"""Invalid project attribute '%s' specified
for project at '%s'""" % (attribute, self.location))
else:
self.__dict__[attribute] = specification
def get(self, attribute):
assert isinstance(attribute, basestring)
return self.__dict__[attribute]
def getDefault(self, attribute, default):
assert isinstance(attribute, basestring)
return self.__dict__.get(attribute, default)
def dump(self):
"""Prints the project attributes."""
id = self.get("id")
if not id:
id = "(none)"
else:
id = id[0]
parent = self.get("parent")
if not parent:
parent = "(none)"
else:
parent = parent[0]
print "'%s'" % id
print "Parent project:%s", parent
print "Requirements:%s", self.get("requirements")
print "Default build:%s", string.join(self.get("debuild-build"))
print "Source location:%s", string.join(self.get("source-location"))
print "Projects to build:%s", string.join(self.get("projects-to-build").sort());
class ProjectRules:
"""Class keeping all rules that are made available to Jamfile."""
def __init__(self, registry):
self.registry = registry
self.manager_ = registry.manager
self.rules = {}
self.local_names = [x for x in self.__class__.__dict__
if x not in ["__init__", "init_project", "add_rule",
"error_reporting_wrapper", "add_rule_for_type", "reverse"]]
self.all_names_ = [x for x in self.local_names]
def _import_rule(self, bjam_module, name, callable_):
assert isinstance(bjam_module, basestring)
assert isinstance(name, basestring)
assert callable(callable_)
if hasattr(callable_, "bjam_signature"):
bjam.import_rule(bjam_module, name, self.make_wrapper(callable_), callable_.bjam_signature)
else:
bjam.import_rule(bjam_module, name, self.make_wrapper(callable_))
def add_rule_for_type(self, type):
assert isinstance(type, basestring)
rule_name = type.lower().replace("_", "-")
@bjam_signature([['name'], ['sources', '*'], ['requirements', '*'],
['default_build', '*'], ['usage_requirements', '*']])
def xpto (name, sources=[], requirements=[], default_build=[], usage_requirements=[]):
return self.manager_.targets().create_typed_target(
type, self.registry.current(), name, sources,
requirements, default_build, usage_requirements)
self.add_rule(rule_name, xpto)
def add_rule(self, name, callable_):
assert isinstance(name, basestring)
assert callable(callable_)
self.rules[name] = callable_
self.all_names_.append(name)
# Add new rule at global bjam scope. This might not be ideal,
# added because if a jamroot does 'import foo' where foo calls
# add_rule, we need to import new rule to jamroot scope, and
# I'm lazy to do this now.
self._import_rule("", name, callable_)
def all_names(self):
return self.all_names_
def call_and_report_errors(self, callable_, *args, **kw):
assert callable(callable_)
result = None
try:
self.manager_.errors().push_jamfile_context()
result = callable_(*args, **kw)
except ExceptionWithUserContext, e:
e.report()
except Exception, e:
try:
self.manager_.errors().handle_stray_exception (e)
except ExceptionWithUserContext, e:
e.report()
finally:
self.manager_.errors().pop_jamfile_context()
return result
def make_wrapper(self, callable_):
"""Given a free-standing function 'callable', return a new
callable that will call 'callable' and report all exceptins,
using 'call_and_report_errors'."""
assert callable(callable_)
def wrapper(*args, **kw):
return self.call_and_report_errors(callable_, *args, **kw)
return wrapper
def init_project(self, project_module, python_standalone=False):
assert isinstance(project_module, basestring)
assert isinstance(python_standalone, bool)
if python_standalone:
m = sys.modules[project_module]
for n in self.local_names:
if n != "import_":
setattr(m, n, getattr(self, n))
for n in self.rules:
setattr(m, n, self.rules[n])
return
for n in self.local_names:
# Using 'getattr' here gives us a bound method,
# while using self.__dict__[r] would give unbound one.
v = getattr(self, n)
if callable(v):
if n == "import_":
n = "import"
else:
n = string.replace(n, "_", "-")
self._import_rule(project_module, n, v)
for n in self.rules:
self._import_rule(project_module, n, self.rules[n])
def project(self, *args):
assert is_iterable(args) and all(is_iterable(arg) for arg in args)
jamfile_module = self.registry.current().project_module()
attributes = self.registry.attributes(jamfile_module)
id = None
if args and args[0]:
id = args[0][0]
args = args[1:]
if id:
attributes.set('id', [id])
explicit_build_dir = None
for a in args:
if a:
attributes.set(a[0], a[1:], exact=0)
if a[0] == "build-dir":
explicit_build_dir = a[1]
# If '--build-dir' is specified, change the build dir for the project.
if self.registry.global_build_dir:
location = attributes.get("location")
# Project with empty location is 'standalone' project, like
# user-config, or qt. It has no build dir.
# If we try to set build dir for user-config, we'll then
# try to inherit it, with either weird, or wrong consequences.
if location and location == attributes.get("project-root"):
# Re-read the project id, since it might have been changed in
# the project's attributes.
id = attributes.get('id')
# This is Jamroot.
if id:
if explicit_build_dir and os.path.isabs(explicit_build_dir):
self.registry.manager.errors()(
"""Absolute directory specified via 'build-dir' project attribute
Don't know how to combine that with the --build-dir option.""")
rid = id
if rid[0] == '/':
rid = rid[1:]
p = os.path.join(self.registry.global_build_dir, rid)
if explicit_build_dir:
p = os.path.join(p, explicit_build_dir)
attributes.set("build-dir", p, exact=1)
elif explicit_build_dir:
self.registry.manager.errors()(
"""When --build-dir is specified, the 'build-dir'
attribute is allowed only for top-level 'project' invocations""")
def constant(self, name, value):
"""Declare and set a project global constant.
Project global constants are normal variables but should
not be changed. They are applied to every child Jamfile."""
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(value, basestring)
self.registry.current().add_constant(name[0], value)
def path_constant(self, name, value):
"""Declare and set a project global constant, whose value is a path. The
path is adjusted to be relative to the invocation directory. The given
value path is taken to be either absolute, or relative to this project
root."""
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(value, basestring)
if len(value) > 1:
self.registry.manager.error()("path constant should have one element")
self.registry.current().add_constant(name[0], value[0], path=1)
def use_project(self, id, where):
# See comment in 'load' for explanation why we record the
# parameters as opposed to loading the project now.
assert is_iterable_typed(id, basestring)
assert is_iterable_typed(where, basestring)
m = self.registry.current().project_module()
self.registry.used_projects[m].append((id[0], where[0]))
def build_project(self, dir):
assert is_iterable_typed(dir, basestring)
jamfile_module = self.registry.current().project_module()
attributes = self.registry.attributes(jamfile_module)
now = attributes.get("projects-to-build")
attributes.set("projects-to-build", now + dir, exact=True)
def explicit(self, target_names):
assert is_iterable_typed(target_names, basestring)
self.registry.current().mark_targets_as_explicit(target_names)
def always(self, target_names):
assert is_iterable_typed(target_names, basestring)
self.registry.current().mark_targets_as_alays(target_names)
def glob(self, wildcards, excludes=None):
assert is_iterable_typed(wildcards, basestring)
assert is_iterable_typed(excludes, basestring)or excludes is None
return self.registry.glob_internal(self.registry.current(),
wildcards, excludes, "glob")
def glob_tree(self, wildcards, excludes=None):
assert is_iterable_typed(wildcards, basestring)
assert is_iterable_typed(excludes, basestring) or excludes is None
bad = 0
for p in wildcards:
if os.path.dirname(p):
bad = 1
if excludes:
for p in excludes:
if os.path.dirname(p):
bad = 1
if bad:
self.registry.manager.errors()(
"The patterns to 'glob-tree' may not include directory")
return self.registry.glob_internal(self.registry.current(),
wildcards, excludes, "glob_tree")
def using(self, toolset, *args):
# The module referred by 'using' can be placed in
# the same directory as Jamfile, and the user
# will expect the module to be found even though
# the directory is not in BOOST_BUILD_PATH.
# So temporary change the search path.
assert is_iterable_typed(toolset, basestring)
current = self.registry.current()
location = current.get('location')
m = self.registry.load_module(toolset[0], [location])
if not m.__dict__.has_key("init"):
self.registry.manager.errors()(
"Tool module '%s' does not define the 'init' method" % toolset[0])
m.init(*args)
# The above might have clobbered .current-project. Restore the correct
# value.
self.registry.set_current(current)
def import_(self, name, names_to_import=None, local_names=None):
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(names_to_import, basestring) or names_to_import is None
assert is_iterable_typed(local_names, basestring)or local_names is None
name = name[0]
py_name = name
if py_name == "os":
py_name = "os_j"
jamfile_module = self.registry.current().project_module()
attributes = self.registry.attributes(jamfile_module)
location = attributes.get("location")
saved = self.registry.current()
m = self.registry.load_module(py_name, [location])
for f in m.__dict__:
v = m.__dict__[f]
f = f.replace("_", "-")
if callable(v):
qn = name + "." + f
self._import_rule(jamfile_module, qn, v)
record_jam_to_value_mapping(qualify_jam_action(qn, jamfile_module), v)
if names_to_import:
if not local_names:
local_names = names_to_import
if len(names_to_import) != len(local_names):
self.registry.manager.errors()(
"""The number of names to import and local names do not match.""")
for n, l in zip(names_to_import, local_names):
self._import_rule(jamfile_module, l, m.__dict__[n])
self.registry.set_current(saved)
def conditional(self, condition, requirements):
"""Calculates conditional requirements for multiple requirements
at once. This is a shorthand to be reduce duplication and to
keep an inline declarative syntax. For example:
lib x : x.cpp : [ conditional <toolset>gcc <variant>debug :
<define>DEBUG_EXCEPTION <define>DEBUG_TRACE ] ;
"""
assert is_iterable_typed(condition, basestring)
assert is_iterable_typed(requirements, basestring)
c = string.join(condition, ",")
if c.find(":") != -1:
return [c + r for r in requirements]
else:
return [c + ":" + r for r in requirements]
def option(self, name, value):
assert is_iterable(name) and isinstance(name[0], basestring)
assert is_iterable(value) and isinstance(value[0], basestring)
name = name[0]
if not name in ["site-config", "user-config", "project-config"]:
get_manager().errors()("The 'option' rule may be used only in site-config or user-config")
option.set(name, value[0])
| apache-2.0 |
UTSA-ICS/keystone-SID | keystone/contrib/revoke/controllers.py | 5 | 1810 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import controller
from keystone.common import dependency
from keystone import exception
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import timeutils
@dependency.requires('revoke_api')
class RevokeController(controller.V3Controller):
@controller.protected()
def list_revoke_events(self, context):
since = context['query_string'].get('since')
last_fetch = None
if since:
try:
last_fetch = timeutils.normalize_time(
timeutils.parse_isotime(since))
except ValueError:
raise exception.ValidationError(
message=_('invalid date format %s') % since)
events = self.revoke_api.get_events(last_fetch=last_fetch)
# Build the links by hand as the standard controller calls require ids
response = {'events': [event.to_dict() for event in events],
'links': {
'next': None,
'self': RevokeController.base_url(
context,
path=context['path']) + '/events',
'previous': None}
}
return response
| apache-2.0 |
apache/incubator-eagle | eagle-external/hadoop_jmx_collector/hadoop_ha_checker.py | 4 | 5879 | # !/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from metric_collector import MetricCollector, JmxReader, YarnWSReader, Runner
import logging,socket,string
class HadoopNNHAChecker(MetricCollector):
def run(self):
hosts = []
host_name_list = []
for input in self.config["input"]:
if not input.has_key("host"):
input["host"] = socket.getfqdn()
if input.has_key("component") and input["component"] == "namenode":
hosts.append(input)
host_name_list.append(input["host"])
if not bool(hosts):
logging.warn("non hosts are configured as 'namenode' in 'input' config, exit")
return
logging.info("Checking namenode HA: " + str(hosts))
total_count = len(hosts)
all_hosts_name = string.join(host_name_list,",")
self.collect({
"host": all_hosts_name,
"component": "namenode",
"metric": "hadoop.namenode.hastate.total.count",
"value": total_count
})
active_count = 0
standby_count = 0
failed_count = 0
for host in hosts:
try:
bean = JmxReader(host["host"], host["port"], host["https"]).open().get_jmx_bean_by_name(
"Hadoop:service=NameNode,name=FSNamesystem")
if not bean:
logging.error("JMX Bean[Hadoop:service=NameNode,name=FSNamesystem] is null from " + host["host"])
if bean.has_key("tag.HAState"):
logging.debug(str(host) + " is " + bean["tag.HAState"])
if bean["tag.HAState"] == "active":
active_count += 1
else:
standby_count += 1
else:
logging.info("'tag.HAState' not found from jmx of " + host["host"] + ":" + host["port"])
except Exception as e:
logging.exception("failed to read jmx from " + host["host"] + ":" + host["port"])
failed_count += 1
self.collect({
"host": all_hosts_name,
"component": "namenode",
"metric": "hadoop.namenode.hastate.active.count",
"value": active_count
})
self.collect({
"host": all_hosts_name,
"component": "namenode",
"metric": "hadoop.namenode.hastate.standby.count",
"value": standby_count
})
self.collect({
"host": all_hosts_name,
"component": "namenode",
"metric": "hadoop.namenode.hastate.failed.count",
"value": failed_count
})
class HadoopRMHAChecker(MetricCollector):
def run(self):
hosts = []
all_hosts = []
for input in self.config["input"]:
if not input.has_key("host"):
input["host"] = socket.getfqdn()
if input.has_key("component") and input["component"] == "resourcemanager":
hosts.append(input)
all_hosts.append(input["host"])
if not bool(hosts):
logging.warn("Non hosts are configured as 'resourcemanager' in 'input' config, exit")
return
logging.info("Checking resource manager HA: " + str(hosts))
total_count = len(hosts)
all_hosts_name = string.join(all_hosts,",")
self.collect({
"host": all_hosts_name,
"component": "resourcemanager",
"metric": "hadoop.resourcemanager.hastate.total.count",
"value": total_count
})
active_count = 0
standby_count = 0
failed_count = 0
for host in hosts:
try:
cluster_info = YarnWSReader(host["host"], host["port"], host["https"]).read_cluster_info()
if not cluster_info:
logging.error("Cluster info is null from web service of " + host["host"])
raise Exception("cluster info is null from " + host["host"])
if cluster_info["clusterInfo"]["haState"] == "ACTIVE":
active_count += 1
else:
standby_count += 1
except Exception as e:
logging.error("Failed to read yarn ws from " + str(host))
failed_count += 1
self.collect({
"host": all_hosts_name,
"component": "resourcemanager",
"metric": "hadoop.resourcemanager.hastate.active.count",
"value": active_count
})
self.collect({
"host": all_hosts_name,
"component": "resourcemanager",
"metric": "hadoop.resourcemanager.hastate.standby.count",
"value": standby_count
})
self.collect({
"host": all_hosts_name,
"component": "resourcemanager",
"metric": "hadoop.resourcemanager.hastate.failed.count",
"value": failed_count
})
if __name__ == '__main__':
Runner.run(HadoopNNHAChecker(), HadoopRMHAChecker()) | apache-2.0 |
soerendip42/rdkit | Code/SimDivPickers/Wrap/testPickers.py | 3 | 5424 | from rdkit import RDConfig
import unittest,os
from rdkit.SimDivFilters import rdSimDivPickers
from rdkit.DataManip.Metric import rdMetricMatrixCalc as rdmmc
import numpy
import random
class TestCase(unittest.TestCase):
def setUp(self) :
self.n = 1000
self.m = 80
self.d = 2
self.dataPts = numpy.zeros((self.n, self.d), 'd')
for i in range(self.n):
for j in range(self.d):
self.dataPts[i,j] = random.random()
self.dMat = rdmmc.GetEuclideanDistMat(self.dataPts)
def test0MaxMin(self):
pkr = rdSimDivPickers.MaxMinPicker()
maxmin = pkr.Pick(self.dMat, self.n, self.m,(886,112))
self.assertEqual(maxmin[0],886)
self.assertEqual(maxmin[1],112)
def func(i,j):
if i==j:
return 0.0
if i<j:
j,i=i,j
return self.dMat[i*(i-1)//2+j]
lmaxmin = pkr.LazyPick(func, self.n, self.m,(886,112))
self.assertEqual(list(lmaxmin),list(maxmin))
lmaxmin = pkr.LazyPick(func, self.n, self.m,(886,112),useCache=False)
self.assertEqual(list(lmaxmin),list(maxmin))
self.assertRaises(ValueError,lambda:pkr.Pick(self.dMat, self.n, self.m,(1012,)))
self.assertRaises(ValueError,lambda:pkr.Pick(self.dMat, self.n, self.m,(-1,)))
maxmin = pkr.Pick(self.dMat, self.n, self.m)
self.assertTrue(maxmin)
lmaxmin = pkr.LazyPick(func, self.n, self.m)
self.assertTrue(lmaxmin)
def test1HierarchPick(self) :
fname = os.path.join(RDConfig.RDBaseDir,'Code','SimDivPickers','Wrap','test_data','points.csv')
with open(fname) as infil:
lines = infil.readlines()
self.dataPts = numpy.zeros((len(lines), 2), 'd')
labels = []
i = 0
for line in lines :
tlst = line.strip().split(',')
self.dataPts[i, 0] = float(tlst[1])
self.dataPts[i, 1] = float(tlst[2])
labels.append(int(tlst[3]))
i += 1
self.dMat = rdmmc.GetEuclideanDistMat(self.dataPts)
pkr = rdSimDivPickers.HierarchicalClusterPicker(rdSimDivPickers.ClusterMethod.WARD)
clusters = pkr.Cluster(self.dMat, i, 2)
# check that each of the clusters have the same label
for cl in clusters :
clbl = labels[cl[0]]
for id in cl:
assert clbl == labels[id]
hierarch = pkr.Pick(self.dMat, i, 2)
self.assertEqual(tuple(hierarch),(1,30))
def testIssue208(self) :
sz = 10
N=3
m = []
for i in range(sz):
for j in range(i+1,sz):
m.append(random.random())
m = numpy.array(m)
picker = rdSimDivPickers.HierarchicalClusterPicker(rdSimDivPickers.ClusterMethod.WARD)
p1 = list(picker.Pick(m,sz,N))
p1.sort()
p2 = list(picker.Pick(m,sz,N))
p2.sort()
self.assertEqual(p1,p2)
def testInts(self) :
""" make sure we can handle ints too """
sz = 10
N=3
m = []
for i in range(sz):
for j in range(i+1,sz):
m.append(int(100*random.random()))
m = numpy.array(m)
picker = rdSimDivPickers.HierarchicalClusterPicker(rdSimDivPickers.ClusterMethod.WARD)
p1 = list(picker.Pick(m,sz,N))
p1.sort()
p2 = list(picker.Pick(m,sz,N))
p2.sort()
self.assertEqual(p1,p2)
def testNonUniqueCrash(self) :
from rdkit import DataStructs
sz = 10
nbits=20
nBitsToSet=int(nbits*.3)
N=12
vs = []
for i in range(sz):
bv = DataStructs.ExplicitBitVect(nbits)
for j in range(nBitsToSet):
val= int(nbits*random.random())
bv.SetBit(val)
vs.append(bv)
vs.append(bv)
def taniFunc(i,j,bvs = vs):
d = 1-DataStructs.FingerprintSimilarity(bvs[i],bvs[j])
return d
picker = rdSimDivPickers.MaxMinPicker()
try:
mm1 = picker.LazyPick(taniFunc,len(vs),N)
except:
ok=False
else:
ok=True
self.assertTrue(ok)
self.assertEqual(len(mm1),N)
picker = None
picker = rdSimDivPickers.MaxMinPicker()
try:
mm2 = picker.LazyBitVectorPick(vs,len(vs),N)
except:
ok=False
else:
ok=True
self.assertTrue(ok)
self.assertEqual(len(mm2),N)
self.assertEqual(tuple(mm2),tuple(mm1))
picker = None
ds = []
nvs = len(vs)
for i in range(nvs):
for j in range(i+1,nvs):
d = taniFunc(i,j)
ds.append(d)
m = numpy.array(ds)
picker = rdSimDivPickers.HierarchicalClusterPicker(rdSimDivPickers.ClusterMethod.WARD)
p1 = list(picker.Pick(m,nvs,N))
def testBitVectorMaxMin(self):
from rdkit import DataStructs
sz = 100
nbits=200
nBitsToSet=int(nbits*.1)
N=10
vs = []
for i in range(sz):
bv = DataStructs.ExplicitBitVect(nbits)
for j in range(nBitsToSet):
val= int(nbits*random.random())
bv.SetBit(val)
vs.append(bv)
def func(i,j,bvs = vs):
d = DataStructs.TanimotoSimilarity(bvs[i],bvs[j],returnDistance=True)
return d
picker = rdSimDivPickers.MaxMinPicker()
mm1 = picker.LazyPick(func,len(vs),N)
self.assertEqual(len(mm1),N)
mm2 = picker.LazyPick(func,len(vs),N,useCache=False)
self.assertEqual(len(mm2),N)
self.assertEqual(list(mm1),list(mm2))
mm2 = picker.LazyBitVectorPick(vs,len(vs),N)
self.assertEqual(len(mm2),N)
self.assertEqual(list(mm1),list(mm2))
mm2 = picker.LazyBitVectorPick(vs,len(vs),N,useCache=False)
self.assertEqual(len(mm2),N)
self.assertEqual(list(mm1),list(mm2))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
rosmo/ansible | lib/ansible/modules/cloud/amazon/aws_config_recorder.py | 27 | 7722 | #!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_config_recorder
short_description: Manage AWS Config Recorders
description:
- Module manages AWS Config configuration recorder settings
version_added: "2.6"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
name:
description:
- The name of the AWS Config resource.
required: true
state:
description:
- Whether the Config rule should be present or absent.
default: present
choices: ['present', 'absent']
role_arn:
description:
- Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account.
- Required when state=present
recording_group:
description:
- Specifies the types of AWS resources for which AWS Config records configuration changes.
- Required when state=present
suboptions:
all_supported:
description:
- Specifies whether AWS Config records configuration changes for every supported type of regional resource.
- If you set this option to `true`, when AWS Config adds support for a new type of regional resource, it starts
recording resources of that type automatically.
- If you set this option to `true`, you cannot enumerate a list of `resource_types`.
include_global_types:
description:
- Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources)
with the resources that it records.
- Before you can set this option to `true`, you must set the allSupported option to `true`.
- If you set this option to `true`, when AWS Config adds support for a new type of global resource, it starts recording
resources of that type automatically.
- The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items,
you should consider customizing AWS Config in only one region to record global resources.
resource_types:
description:
- A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example,
`AWS::EC2::Instance` or `AWS::CloudTrail::Trail`).
- Before you can set this option to `true`, you must set the `all_supported` option to `false`.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create Configuration Recorder for AWS Config
aws_config_recorder:
name: test_configuration_recorder
state: present
role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder'
recording_group:
all_supported: true
include_global_types: true
'''
RETURN = '''#'''
try:
import botocore
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, AWSRetry
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
def resource_exists(client, module, params):
try:
recorder = client.describe_configuration_recorders(
ConfigurationRecorderNames=[params['name']]
)
return recorder['ConfigurationRecorders'][0]
except is_boto3_error_code('NoSuchConfigurationRecorderException'):
return
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
response = client.put_configuration_recorder(
ConfigurationRecorder=params
)
result['changed'] = True
result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder")
def update_resource(client, module, params, result):
current_params = client.describe_configuration_recorders(
ConfigurationRecorderNames=[params['name']]
)
if params != current_params['ConfigurationRecorders'][0]:
try:
response = client.put_configuration_recorder(
ConfigurationRecorder=params
)
result['changed'] = True
result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder")
def delete_resource(client, module, params, result):
try:
response = client.delete_configuration_recorder(
ConfigurationRecorderName=params['name']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder")
def main():
module = AnsibleAWSModule(
argument_spec={
'name': dict(type='str', required=True),
'state': dict(type='str', choices=['present', 'absent'], default='present'),
'role_arn': dict(type='str'),
'recording_group': dict(type='dict'),
},
supports_check_mode=False,
required_if=[
('state', 'present', ['role_arn', 'recording_group']),
],
)
result = {
'changed': False
}
name = module.params.get('name')
state = module.params.get('state')
params = {}
if name:
params['name'] = name
if module.params.get('role_arn'):
params['roleARN'] = module.params.get('role_arn')
if module.params.get('recording_group'):
params['recordingGroup'] = {}
if module.params.get('recording_group').get('all_supported') is not None:
params['recordingGroup'].update({
'allSupported': module.params.get('recording_group').get('all_supported')
})
if module.params.get('recording_group').get('include_global_types') is not None:
params['recordingGroup'].update({
'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types')
})
if module.params.get('recording_group').get('resource_types'):
params['recordingGroup'].update({
'resourceTypes': module.params.get('recording_group').get('resource_types')
})
client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
if state == 'present':
if not resource_status:
create_resource(client, module, params, result)
if resource_status:
update_resource(client, module, params, result)
if state == 'absent':
if resource_status:
delete_resource(client, module, params, result)
module.exit_json(changed=result['changed'])
if __name__ == '__main__':
main()
| gpl-3.0 |
qqzwc/XX-Net | code/default/python27/1.0/lib/stat.py | 319 | 1842 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
return mode & 07777
def S_IFMT(mode):
return mode & 0170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0040000
S_IFCHR = 0020000
S_IFBLK = 0060000
S_IFREG = 0100000
S_IFIFO = 0010000
S_IFLNK = 0120000
S_IFSOCK = 0140000
# Functions to test for each file type
def S_ISDIR(mode):
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 04000
S_ISGID = 02000
S_ENFMT = S_ISGID
S_ISVTX = 01000
S_IREAD = 00400
S_IWRITE = 00200
S_IEXEC = 00100
S_IRWXU = 00700
S_IRUSR = 00400
S_IWUSR = 00200
S_IXUSR = 00100
S_IRWXG = 00070
S_IRGRP = 00040
S_IWGRP = 00020
S_IXGRP = 00010
S_IRWXO = 00007
S_IROTH = 00004
S_IWOTH = 00002
S_IXOTH = 00001
# Names for file flags
UF_NODUMP = 0x00000001
UF_IMMUTABLE = 0x00000002
UF_APPEND = 0x00000004
UF_OPAQUE = 0x00000008
UF_NOUNLINK = 0x00000010
UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
SF_ARCHIVED = 0x00010000
SF_IMMUTABLE = 0x00020000
SF_APPEND = 0x00040000
SF_NOUNLINK = 0x00100000
SF_SNAPSHOT = 0x00200000
| bsd-2-clause |
tuckbloor/node.js-chat | node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
bbc/kamaelia | Sketches/PO/KamPlanet/introspector.py | 3 | 1823 | # -*- coding: utf-8 -*-
# http://yeoldeclue.com/cgi-bin/blog/blog.cgi?rm=viewpost&nodeid=1200236224
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import Axon
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Console import ConsoleEchoer
class PeriodicWakeup(Axon.ThreadedComponent.threadedcomponent):
interval = 1
def main(self):
while 1:
time.sleep(self.interval)
self.send("tick", "outbox")
class WakeableIntrospector(Axon.Component.component):
def main(self):
while 1:
Q = [ q.name for q in self.scheduler.listAllThreads() ]
Q.sort()
self.send("\n*debug* THREADS"+ str(Q)+"\n", "outbox")
self.scheduler.debuggingon = False
yield 1
while not self.dataReady("inbox"):
self.pause()
yield 1
while self.dataReady("inbox"):
self.recv("inbox")
def activate():
Pipeline(
PeriodicWakeup(),
WakeableIntrospector(),
ConsoleEchoer(),
).activate()
| apache-2.0 |
luismasuelli/python-server-cantrips | cantrips/protocol/messaging/formats.py | 1 | 10136 | from cantrips.features import Feature
from cantrips.types.exception import factory
from collections import namedtuple
from enum import Enum
import json
from cantrips.protocol.messaging import Message
_32bits = (1 << 32) - 1
class MsgPackFeature(Feature):
@classmethod
def _import_it(cls):
"""
Imports msgpack library.
"""
import msgpack
import msgpack.exceptions
return msgpack, msgpack.exceptions.UnpackException
@classmethod
def _import_error_message(cls):
"""
Message error for msgpack not found.
"""
return "You need to install msgpack for this to work (pip install msgpack-python>=0.4.6)"
def _json_serializer():
"""
Returns an object with dumps() and loads() for json format.
"""
return json
def _msgpack_serializer():
"""
Returns an object with dumps() and loads() for msgpack format.
"""
return MsgPackFeature.import_it()[0]
def _json_serializer_exceptions():
"""
Returns a tuple containing only the json-related exceptions.
"""
return TypeError, ValueError
def _msgpack_serializer_exceptions():
"""
Returns a tuple containing only the MsgPack exceptions.
"""
return MsgPackFeature.import_it()[1],
def _split_string_command(command):
"""
Splits as dotted string. The code is the last part of the string.
E.g. a.b.c is splitted as a.b, c
"""
try:
a, b = command.rsplit('.', 1)
return a, b
except:
raise ValueError("Command to parse MUST be a dotted string")
def _split_integer_command(command):
"""
Splits a 64bit integer into high 32 bits and low 32 bits.
"""
return (command >> 32) & _32bits, command & _32bits
def _join_string_command(ns, code):
"""
Joins two strings as a dot-separated string. E.g. a.b, c will be joined as a.b.c.
"""
return "%s.%s" % (ns, code)
def _join_integer_command(ns, code):
"""
Joins two integers as a 64bit integer, being the ns the 32 MSB and the code the 32 LSB.
"""
return ((ns & _32bits) << 32) | code & _32bits
_JOINERS = (_join_string_command, _join_integer_command)
_SPLITTERS = (_split_string_command, _split_integer_command)
_BROKERS = (_json_serializer, _msgpack_serializer)
_EXCEPTIONS = (_json_serializer_exceptions, _msgpack_serializer_exceptions)
_MEMBER_NAMES = ('string', 'integer')
class Formats(int, Enum):
"""
Parsing formats for messages. Intended:
- 0 -> string -> JSON.
- 1 -> integer -> MsgPack.
"""
FORMAT_STRING = 0
FORMAT_INTEGER = 1
@property
def split(self):
if not hasattr(self, '__split'):
self.__split = _SPLITTERS[self.value]
return self.__split
@property
def join(self):
if not hasattr(self, '__join'):
self.__join = _JOINERS[self.value]
return self.__join
@property
def member_name(self):
if not hasattr(self, '__member'):
self.__member = _MEMBER_NAMES[self.value]
return self.__member
@property
def broker(self):
if not hasattr(self, '__broker'):
self.__broker = _BROKERS[self.value]()
return self.__broker
@property
def exceptions(self):
if not hasattr(self, '__exceptions'):
self.__exceptions = _EXCEPTIONS[self.value]()
return self.__exceptions
def spec_value(self, spec):
return spec[self.value]
class CommandSpec(namedtuple('_CommandSpec', _MEMBER_NAMES)):
"""
This class will be used to instantiate each namespace and code (they, together, conform a command),
which can be specified by integer or by string (regardless the output format, either msgpack or json).
However, a message *must* have *str* keyword arguments, so as_keyword() will always yield the string
component.
"""
def as_keyword(self):
return self.string
ANY_COMMAND = CommandSpec(0xFFFFFFFF, '__any__')
def _cannot_add_any_or_unknown(command):
if command == ANY_COMMAND:
raise ValueError('Cannot add to translation ANY_COMMAND value as neither namespace or code')
class CommandNamespaceMap(namedtuple('_CommandNamespaceMap', ['translator', 'spec', 'map'])):
"""
A (spec='...', map={...}) tuple.
"""
def __new__(cls, translator, ns_spec):
return super(CommandNamespaceMap, cls).__new__(cls, translator, ns_spec, {})
def add_command(self, spec):
"""
Adds a command to the map by its code. ANY_COMMAND cannot be translated with this method.
"""
if not self.translator:
raise ValueError("Cannot add a command to a namespace map without translator")
_cannot_add_any_or_unknown(spec)
self.map[self.translator.format.spec_value(spec)] = spec
return self
UNKNOWN_NAMESPACE_MAP = CommandNamespaceMap(None, None)
class Translator(object):
"""
Stores a map of commands, according to the chosen command format.
It will keep an inner mapping like {F_ : (C, {F_ : C})} where F_ is the appropriate received format
(say: string or integer) while C is a CommandSpec instance.
"""
Error = factory([
'UNEXPECTED_BINARY',
'UNEXPECTED_TEXT',
'EXPECTED_MAP',
'EXPECTED_MAP_WITH_CODE',
'EXPECTED_ARGS_AS_LIST',
'EXPECTED_KWARGS_AS_DICT',
'EXPECTED_KWARGS_KEYS_AS_STRING',
'UNKNOWN_COMMAND'
])
def __init__(self, format):
self.__format = format
self.__map = {}
@property
def format(self):
return self.__format
def namespace(self, spec):
"""
Adds a new namespace translation. ANY_COMMAND cannot be translated with this method.
:param spec: A CommandSpec instance to add.
:returns: A just-created CommandNamespaceMap instance.
"""
_cannot_add_any_or_unknown(spec)
return self.__map.setdefault(self.format.spec_value(spec), CommandNamespaceMap(self, spec))
def translate(self, full_command):
"""
Breaks a full command in namespace and code. If either of the command parts is not known, KeyError
will be raised.
:param full_command: A raw value, according to the format.
:returns: A tuple with (namespace, code).
"""
namespace, code = self.format.split(full_command)
namespace_map = self.__map.get(namespace, UNKNOWN_NAMESPACE_MAP)
return namespace_map.spec, namespace_map.map[code]
def untranslate(self, namespace, code):
"""
Untranslated a translated message. The inverse of translate(full_command).
:param namespace: A CommandSpec to pass.
:param code: A CommandSpec to pass.
:returns: string or integer of 64bits.
"""
return self.format.join(self.format.spec_value(namespace), self.format.spec_value(code))
def parse_data(self, data, binary=None):
"""
Parses the incomming data. Binary is a tri-state variable:
- False: The parsed data is required to be text/json.
It is an error to receive a binary content if this translator has a text format.
- True: The parsed data is required to be binary/msgpack.
It is an error to receive a text content if this translator has a binary format.
- None: The parsed data is not required any format.
The format is decided by the in-use translator.
Returns a Message instance.
:param data: Message to be parsed (str or unicode or bytes data).
:param binary: Tri-state boolean telling the expected input value type.
:returns: A parsed Message
"""
if binary is not None:
if binary and self.format == Formats.FORMAT_STRING:
raise self.Error("Binary parsing was requested, but this translator uses a JSON format",
self.Error.UNEXPECTED_BINARY)
if not binary and self.format == Formats.FORMAT_STRING:
raise self.Error("Text parsing was requested, but this translator uses a MSGPACK format",
self.Error.UNEXPECTED_TEXT)
data = self.format.broker.loads(data)
if not isinstance(data, dict):
raise self.Error("Received data is not a valid map object (JSON literal / Msgpack Map)",
self.Error.EXPECTED_MAP)
if 'code' not in dict:
raise self.Error("Received data has not a `code` member", self.Error.EXPECTED_MAP_WITH_CODE)
try:
ns, code = self.translate(dict['code'])
except KeyError:
raise self.Error("Expected message with a known pair of namespace/code", self.Error.UNKNOWN_COMMAND)
args = data.get('args', ())
if not isinstance(args, (list, tuple)):
raise self.Error("Expected message args as list", self.Error.EXPECTED_ARGS_AS_LIST)
kwargs = data.get('kwargs', {})
if not isinstance(kwargs, dict):
raise self.Error("Expected message kwargs as dict", self.Error.EXPECTED_KWARGS_AS_DICT)
try:
return Message(ns, code, *args, **kwargs)
except SyntaxError as e:
raise self.Error("Expected message kwargs keys as string", self.Error.EXPECTED_KWARGS_KEYS_AS_STRING)
def serialize(self, message):
"""
Serializes the message data, ready to be sent.
:param message: A Message instance.
:returns: data to be sent.
"""
return self.format.broker.dumps({
'code': self.untranslate(message.code[0], message.code[1]),
'args': message.args,
'kwargs': message.kwargs
})
class JSONTranslator(Translator):
"""
Translator using JSON format.
"""
def __init__(self):
super(JSONTranslator, self).__init__(Formats.FORMAT_STRING)
class MsgPackTranslator(Translator):
"""
Translator using MsgPack format.
"""
def __init__(self):
super(MsgPackTranslator, self).__init__(Formats.FORMAT_INTEGER) | lgpl-3.0 |
AnishShah/tensorflow | tensorflow/contrib/learn/python/learn/datasets/synthetic.py | 42 | 7451 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synthetic dataset generators (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets.base import Dataset
from tensorflow.python.util.deprecation import deprecated
@deprecated(None, 'Consider using synthetic datasets from scikits.learn.')
def circles(n_samples=100,
noise=None,
seed=None,
factor=0.8,
n_classes=2,
*args,
**kwargs):
"""Create circles separated by some value
Args:
n_samples: int, number of datapoints to generate
noise: float or None, standard deviation of the Gaussian noise added
seed: int or None, seed for the noise
factor: float, size factor of the inner circles with respect to the outer
ones
n_classes: int, number of classes to generate
Returns:
Shuffled features and labels for 'circles' synthetic dataset of type
`base.Dataset`
Note:
The multi-class support might not work as expected if `noise` is enabled
TODO:
- Generation of unbalanced data
Credit goes to (under BSD 3 clause):
B. Thirion,
G. Varoquaux,
A. Gramfort,
V. Michel,
O. Grisel,
G. Louppe,
J. Nothman
"""
if seed is not None:
np.random.seed(seed)
# Algo: 1) Generate initial circle, 2) For ever class generate a smaller radius circle
linspace = np.linspace(0, 2 * np.pi, n_samples // n_classes)
circ_x = np.empty(0, dtype=np.int32)
circ_y = np.empty(0, dtype=np.int32)
base_cos = np.cos(linspace)
base_sin = np.sin(linspace)
y = np.empty(0, dtype=np.int32)
for label in range(n_classes):
circ_x = np.append(circ_x, base_cos)
circ_y = np.append(circ_y, base_sin)
base_cos *= factor
base_sin *= factor
y = np.append(y, label * np.ones(n_samples // n_classes, dtype=np.int32))
# Add more points if n_samples is not divisible by n_classes (unbalanced!)
extras = n_samples % n_classes
circ_x = np.append(circ_x, np.cos(np.random.rand(extras) * 2 * np.pi))
circ_y = np.append(circ_y, np.sin(np.random.rand(extras) * 2 * np.pi))
y = np.append(y, np.zeros(extras, dtype=np.int32))
# Reshape the features/labels
X = np.vstack((circ_x, circ_y)).T
y = np.hstack(y)
# Shuffle the data
indices = np.random.permutation(range(n_samples))
if noise is not None:
X += np.random.normal(scale=noise, size=X.shape)
return Dataset(data=X[indices], target=y[indices])
@deprecated(None, 'Consider using synthetic datasets from scikits.learn.')
def spirals(n_samples=100,
noise=None,
seed=None,
mode='archimedes',
n_loops=2,
*args,
**kwargs):
"""Create spirals
Currently only binary classification is supported for spiral generation
Args:
n_samples: int, number of datapoints to generate
noise: float or None, standard deviation of the Gaussian noise added
seed: int or None, seed for the noise
n_loops: int, number of spiral loops, doesn't play well with 'bernoulli'
mode: str, how the spiral should be generated. Current implementations:
'archimedes': a spiral with equal distances between branches
'bernoulli': logarithmic spiral with branch distances increasing
'fermat': a spiral with branch distances decreasing (sqrt)
Returns:
Shuffled features and labels for 'spirals' synthetic dataset of type
`base.Dataset`
Raises:
ValueError: If the generation `mode` is not valid
TODO:
- Generation of unbalanced data
"""
n_classes = 2 # I am not sure how to make it multiclass
_modes = {
'archimedes': _archimedes_spiral,
'bernoulli': _bernoulli_spiral,
'fermat': _fermat_spiral
}
if mode is None or mode not in _modes:
raise ValueError('Cannot generate spiral with mode %s' % mode)
if seed is not None:
np.random.seed(seed)
linspace = np.linspace(0, 2 * n_loops * np.pi, n_samples // n_classes)
spir_x = np.empty(0, dtype=np.int32)
spir_y = np.empty(0, dtype=np.int32)
y = np.empty(0, dtype=np.int32)
for label in range(n_classes):
base_cos, base_sin = _modes[mode](linspace, label * np.pi, *args, **kwargs)
spir_x = np.append(spir_x, base_cos)
spir_y = np.append(spir_y, base_sin)
y = np.append(y, label * np.ones(n_samples // n_classes, dtype=np.int32))
# Add more points if n_samples is not divisible by n_classes (unbalanced!)
extras = n_samples % n_classes
if extras > 0:
x_extra, y_extra = _modes[mode](np.random.rand(extras) * 2 * np.pi, *args,
**kwargs)
spir_x = np.append(spir_x, x_extra)
spir_y = np.append(spir_y, y_extra)
y = np.append(y, np.zeros(extras, dtype=np.int32))
# Reshape the features/labels
X = np.vstack((spir_x, spir_y)).T
y = np.hstack(y)
# Shuffle the data
indices = np.random.permutation(range(n_samples))
if noise is not None:
X += np.random.normal(scale=noise, size=X.shape)
return Dataset(data=X[indices], target=y[indices])
def _archimedes_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Archimedes spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
"""
x, y = theta * np.cos(theta + theta_offset), theta * np.sin(
theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
def _bernoulli_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Equiangular (Bernoulli's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
Kwargs:
exp_scale: growth rate of the exponential
"""
exp_scale = kwargs.pop('exp_scale', 0.1)
x, y = np.exp(exp_scale * theta) * np.cos(theta + theta_offset), np.exp(
exp_scale * theta) * np.sin(theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
def _fermat_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Parabolic (Fermat's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
"""
x, y = np.sqrt(theta) * np.cos(theta + theta_offset), np.sqrt(theta) * np.sin(
theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
| apache-2.0 |
CharellKing/gobang | human_role.py | 1 | 9940 | #!/usr/bin/python
#-*-coding:utf-8-*-
import os
import sys
from threading import Thread
import socket, select
from module_msg import ModuleMsg
from gobang import Gobang, Stone
class HumanRole(object):
def __init__(self, human_in, human_out, human_interface_in, human_interface_out):
self.inputs = []
self.fin= human_in
self.out = human_out
self.interface_in = human_interface_in
self.interface_out = human_interface_out
self.timeout = None
self.work = None
self.thread_is_exit = True
self.color = None
self.status = None
self.time = Gobang.RELAY_TIME
self.gobang = None
self.is_start = False
#判断游戏是否开始
def is_starting(self):
return None != self.status and True == self.is_start
#将对方的颜色通过管道消息的方式发送给对方
def send_color_msg(self):
ModuleMsg(ModuleMsg.PROMT_LOG_MSG_TYPE, ["human send color msg"]).send(self.interface_out)
self.time = Gobang.RELAY_TIME
self.color = Gobang.random_order()
self.gobang = Gobang()
ModuleMsg(ModuleMsg.COLOR_MSG_TYPE, [not self.color]).send(self.out)
if self.color == Stone.WHITE:
self.status = "GO"
else:
self.status = "WAIT"
#接收对方发送给自己的颜色的消息
def recv_color_msg(self, msg):
ModuleMsg(ModuleMsg.PROMT_LOG_MSG_TYPE, ["human recv color msg"]).send(self.interface_out)
color = msg.content[0]
self.is_start = True
self.color = color
if Stone.WHITE == color:
self.status = "GO"
else:
self.status = "WAIT"
#发送开始游戏的消息
def send_start_msg(self, msg):
ModuleMsg(ModuleMsg.PROMT_LOG_MSG_TYPE, ["human send start msg"]).send(self.interface_out)
self.gobang = Gobang()
self.is_start = True
msg.send(self.out)
#接收对方开始的消息
def recv_start_msg(self, msg):
msg.send(self.interface_out)
# ModuleMsg(ModuleMsg.PROMT_LOG_MSG_TYPE, ["human recv start_msg"]).send(self.interface_out)
if True == self.is_start:
self.send_color_msg()
#发送停止的消息
def send_stop_msg(self, msg):
self.is_start = False
self.status = None
self.time = Gobang.RELAY_TIME
(self_ret, x_grid, y_grid, color) = msg.content
if Gobang.UNKNOWN == self_ret:
competitor_ret = Gobang.UNKNOWN
else:
competitor_ret = -self_ret
ModuleMsg(ModuleMsg.STOP_MSG_TYPE, [competitor_ret, x_grid, y_grid, color]).send(self.out)
ModuleMsg(ModuleMsg.STOP_MSG_TYPE, [self_ret, x_grid, y_grid, color]).send(self.interface_out)
# 接收对方停止游戏的消息
def recv_stop_msg(self, msg):
(ret, x_grid, y_grid, color) = msg.content
if None != x_grid and None != y_grid:
self.gobang.put_stone(x_grid, y_grid, color)
msg.send(self.interface_out)
self.is_start = False
self.status = None
self.time = Gobang.RELAY_TIME
#发送下子的消息
def send_putdown_msg(self, msg):
(x_grid, y_grid, color) = msg.content
self.time = Gobang.RELAY_TIME
self.status = "WAIT"
self.gobang.put_stone(x_grid, y_grid, color)
if Gobang.UNKNOWN == self.justy_result(x_grid, y_grid):
msg.send(self.out)
msg.send(self.interface_out)
#判断棋局是否出现结果
def justy_result(self, x_grid, y_grid):
self_ret = Gobang.UNKNOWN
if True == self.gobang.is_tie(x_grid, y_grid):
self_ret = Gobang.TIED
if True == self.gobang.is_five(x_grid, y_grid):
self_ret = Gobang.SUCCESS
if Gobang.UNKNOWN != self_ret:
msg = ModuleMsg(ModuleMsg.STOP_MSG_TYPE, [self_ret, x_grid, y_grid, self.color])
self.send_stop_msg(msg)
return self_ret
# 接收对方落子的消息
def recv_putdown_msg(self, msg):
(x_grid, y_grid, color) = msg.content
self.gobang.put_stone(x_grid, y_grid, color)
msg.send(self.interface_out)
self.status = "GO"
self.time = Gobang.RELAY_TIME
# 发送计时
def send_time_msg(self):
if self.time > 0:
self.time -= 1
msg = ModuleMsg(ModuleMsg.TIME_MSG_TYPE, [self.time])
msg.send(self.interface_out)
msg.send(self.out)
else:
self.status = "WAIT"
self.time = Gobang.RELAY_TIME
(x_grid, y_grid) = self.gobang.random_stone(self.color)
if Gobang.UNKNOWN == self.justy_result(x_grid, y_grid):
msg = ModuleMsg(ModuleMsg.PUT_MSG_TYPE, [x_grid, y_grid, self.color])
msg.send(self.interface_out)
msg.send(self.out)
#接收计时
def recv_time_msg(self, msg):
time = msg.content[0]
msg.send(self.interface_out)
self.time = time
#发送线程结束的消息
def send_thread_exit_msg(self, msg):
self.thread_is_exit = True
msg.send(self.out)
msg.send(self.interface_out)
self.time = Gobang.RELAY_TIME
self.is_start = False
self.color = None
#接收对方线程结束的消息
def recv_thread_exit_msg(self, msg):
self.thread_is_exit = True
self.is_start = False
msg.send(self.interface_out)
self.time = Gobang.RELAY_TIME
self.color = None
#发送游戏退出的消息
def send_exit_msg(self, msg):
msg.send(self.out)
msg.send(self.interface_out)
self.thread_is_exit = True
self.is_start = False
# 接收对方游戏退出的消息
def recv_exit_msg(self, msg):
ModuleMsg(ModuleMsg.EXIT_MSG_TYPE, [msg.content[0]]).send(self.interface_out)
self.thread_is_exit = True
self.is_start = False
#发送监听的消息给cmd_controller或者gui_controller
def send_listen_msg(self, msg):
msg.send(self.out)
#发送连接消息给cmd_controller或者gui_controller
def send_conn_msg(self, msg):
msg.send(self.out)
#处理接收来自己对方的消息
def recv_msg(self, msg):
if msg.msg_type == ModuleMsg.START_MSG_TYPE:
self.recv_start_msg(msg)
elif msg.msg_type == ModuleMsg.COLOR_MSG_TYPE:
self.recv_color_msg(msg)
elif msg.msg_type == ModuleMsg.PUT_MSG_TYPE:
self.recv_putdown_msg(msg)
elif msg.msg_type == ModuleMsg.TIME_MSG_TYPE:
self.recv_time_msg(msg)
elif msg.msg_type == ModuleMsg.THREAD_EXIT_MSG_TYPE:
self.recv_thread_exit_msg(msg)
elif msg.msg_type == ModuleMsg.STOP_MSG_TYPE:
self.recv_stop_msg(msg)
elif msg.msg_type == ModuleMsg.EXIT_MSG_TYPE:
self.recv_exit_msg()
elif msg.msg_type == ModuleMsg.LISTEN_ERR_MSG_TYPE or \
msg.msg_type == ModuleMsg.LISTEN_SUCC_MSG_TYPE or \
msg.msg_type == ModuleMsg.CONNECT_SUCC_MSG_TYPE or \
msg.msg_type == ModuleMsg.SRV_RECV_CONN_MSG_TYPE or \
msg.msg_type == ModuleMsg.CONNECT_ERR_MSG_TYPE:
msg.send(self.interface_out)
else:
ModuleMsg(ModuleMsg.PROMT_LOG_MSG_TYPE, ["[%d]无效的消息" %(msg.msg_type)]).send(self.interface_out)
#处理来自界面(cmd、gui)的消息,主要是将消息转化给对方
def recv_msg_from_interface(self, msg):
if msg.msg_type == ModuleMsg.LISTEN_MSG_TYPE:
self.send_listen_msg(msg)
elif msg.msg_type == ModuleMsg.CONNECT_MSG_TYPE:
self.send_conn_msg(msg)
elif msg.msg_type == ModuleMsg.START_MSG_TYPE:
self.send_start_msg(msg)
elif msg.msg_type == ModuleMsg.PUT_MSG_TYPE:
self.send_putdown_msg(msg)
elif msg.msg_type == ModuleMsg.STOP_MSG_TYPE:
self.send_stop_msg(msg)
elif msg.msg_type == ModuleMsg.THREAD_EXIT_MSG_TYPE:
self.send_thread_exit_msg(msg)
elif msg.msg_type == ModuleMsg.EXIT_MSG_TYPE:
self.send_exit_msg(msg)
else:
ModuleMsg(ModuleMsg.PROMT_LOG_MSG_TYPE, ["[%d]无效的消息" %(msg.msg_type)]).send(self.interface_out)
# human_role的线程
def work_thread(self):
self.inputs = [self.fin, self.interface_in]
outputs = []
timeout = 1
self.thread_is_exit = False
while False == self.thread_is_exit:
readable, writable, exceptional = select.select(self.inputs, outputs, self.inputs, timeout)
if readable or writable or exceptional:
for fd in readable:
if fd is self.fin:
msg_strs = os.read(fd, ModuleMsg.MAX_MSG_LEN).split('\n')
for msg_str in msg_strs:
if "" != msg_str:
msg = ModuleMsg().decode(msg_str)
self.recv_msg(msg)
elif fd is self.interface_in:
msg_strs = os.read(fd, ModuleMsg.MAX_MSG_LEN).split('\n')
for msg_str in msg_strs:
if "" != msg_str:
msg = ModuleMsg().decode(msg_str)
self.recv_msg_from_interface(msg)
elif "GO" == self.status and False == self.thread_is_exit:
self.send_time_msg()
self.inputs.remove(self.fin)
self.inputs.remove(self.interface_in)
os.close(self.fin)
os.close(self.interface_in)
os.close(self.out)
os.close(self.interface_out)
#开启线程
def start(self):
self.work = Thread(target = self.work_thread)
self.work.start()
| apache-2.0 |
viblo/pymunk | pymunk/shape_filter.py | 1 | 4464 | from typing import NamedTuple
class ShapeFilter(NamedTuple):
"""
Pymunk has two primary means of ignoring collisions: groups and
category masks.
Groups are used to ignore collisions between parts on a complex object. A
ragdoll is a good example. When jointing an arm onto the torso, you'll
want them to allow them to overlap. Groups allow you to do exactly that.
Shapes that have the same group don't generate collisions. So by placing
all of the shapes in a ragdoll in the same group, you'll prevent it from
colliding against other parts of itself. Category masks allow you to mark
which categories an object belongs to and which categories it collides
with.
For example, a game has four collision categories: player (0), enemy (1),
player bullet (2), and enemy bullet (3). Neither players nor enemies
should not collide with their own bullets, and bullets should not collide
with other bullets. However, players collide with enemy bullets, and
enemies collide with player bullets.
============= =============== ====================
Object Object Category Category Mask
============= =============== ====================
Player 0b00001 (1) 0b11000 (4, 5)
Enemy 0b00010 (2) 0b01110 (2, 3, 4)
Player Bullet 0b00100 (3) 0b10001 (1, 5)
Enemy Bullet 0b01000 (4) 0b10010 (2, 5)
Walls 0b10000 (5) 0b01111 (1, 2, 3, 4)
============= =============== ====================
Note that in the table the categories and masks are written as binary
values to clearly show the logic. To save space only 5 digits are used. The
default type of categories and mask in ShapeFilter is an unsigned int,
with a resolution of 32 bits. That means that the you have 32 bits to use,
in binary notation that is `0b00000000000000000000000000000000` to
`0b11111111111111111111111111111111` which can be written in hex as
`0x00000000` to `0xFFFFFFFF`.
Everything in this example collides with walls. Additionally,
the enemies collide with each other.
By default, objects exist in every category and collide with every category.
Objects can fall into multiple categories. For instance, you might have a
category for a red team, and have a red player bullet. In the above
example, each object only has one category.
The default type of categories and mask in ShapeFilter is unsigned int
which has a resolution of 32 bits on most systems.
There is one last way of filtering collisions using collision handlers.
See the section on callbacks for more information. Collision handlers can
be more flexible, but can be slower. Fast collision filtering rejects
collisions before running the expensive collision detection code, so
using groups or category masks is preferred.
Example of how category and mask can be used to filter out player from
enemy object:
>>> import pymunk
>>> s = pymunk.Space()
>>> player_b = pymunk.Body(1,1)
>>> player_c = pymunk.Circle(player_b, 10)
>>> s.add(player_b, player_c)
>>> player_c.filter = pymunk.ShapeFilter(categories=0b1)
>>> hit = s.point_query_nearest((0,0), 0, pymunk.ShapeFilter())
>>> hit != None
True
>>> filter = pymunk.ShapeFilter(mask=pymunk.ShapeFilter.ALL_MASKS() ^ 0b1)
>>> hit = s.point_query_nearest((0,0), 0, filter)
>>> hit == None
True
>>> enemy_b = pymunk.Body(1,1)
>>> enemy_c = pymunk.Circle(enemy_b, 10)
>>> s.add(enemy_b, enemy_c)
>>> hit = s.point_query_nearest((0,0), 0, filter)
>>> hit != None
True
"""
group: int = 0
"""Two objects with the same non-zero group value do not collide.
This is generally used to group objects in a composite object together to disable self collisions.
"""
categories: int = 0xFFFFFFFF
"""A bitmask of user definable categories that this object belongs to.
The category/mask combinations of both objects in a collision must agree for a collision to occur.
"""
mask: int = 0xFFFFFFFF
"""A bitmask of user definable category types that this object object collides with.
The category/mask combinations of both objects in a collision must agree for a collision to occur.
"""
@staticmethod
def ALL_MASKS() -> int:
return 0xFFFFFFFF
@staticmethod
def ALL_CATEGORIES() -> int:
return 0xFFFFFFFF
| mit |
jiangzhuo/kbengine | kbe/src/lib/python/Lib/test/test_enumerate.py | 72 | 8072 | import unittest
import operator
import sys
import pickle
from test import support
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class PickleTest:
# Helper to check picklability
def check_pickle(self, itorg, seq):
d = pickle.dumps(itorg)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), seq)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
self.assertFalse(seq[1:])
return
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(list(it), seq[1:])
class EnumerateTestCase(unittest.TestCase, PickleTest):
enum = enumerate
seq, res = 'abc', [(0,'a'), (1,'b'), (2,'c')]
def test_basicfunction(self):
self.assertEqual(type(self.enum(self.seq)), self.enum)
e = self.enum(self.seq)
self.assertEqual(iter(e), e)
self.assertEqual(list(self.enum(self.seq)), self.res)
self.enum.__doc__
def test_pickle(self):
self.check_pickle(self.enum(self.seq), self.res)
def test_getitemseqn(self):
self.assertEqual(list(self.enum(G(self.seq))), self.res)
e = self.enum(G(''))
self.assertRaises(StopIteration, next, e)
def test_iteratorseqn(self):
self.assertEqual(list(self.enum(I(self.seq))), self.res)
e = self.enum(I(''))
self.assertRaises(StopIteration, next, e)
def test_iteratorgenerator(self):
self.assertEqual(list(self.enum(Ig(self.seq))), self.res)
e = self.enum(Ig(''))
self.assertRaises(StopIteration, next, e)
def test_noniterable(self):
self.assertRaises(TypeError, self.enum, X(self.seq))
def test_illformediterable(self):
self.assertRaises(TypeError, self.enum, N(self.seq))
def test_exception_propagation(self):
self.assertRaises(ZeroDivisionError, list, self.enum(E(self.seq)))
def test_argumentcheck(self):
self.assertRaises(TypeError, self.enum) # no arguments
self.assertRaises(TypeError, self.enum, 1) # wrong type (not iterable)
self.assertRaises(TypeError, self.enum, 'abc', 'a') # wrong type
self.assertRaises(TypeError, self.enum, 'abc', 2, 3) # too many arguments
@support.cpython_only
def test_tuple_reuse(self):
# Tests an implementation detail where tuple is reused
# whenever nothing else holds a reference to it
self.assertEqual(len(set(map(id, list(enumerate(self.seq))))), len(self.seq))
self.assertEqual(len(set(map(id, enumerate(self.seq)))), min(1,len(self.seq)))
class MyEnum(enumerate):
pass
class SubclassTestCase(EnumerateTestCase):
enum = MyEnum
class TestEmpty(EnumerateTestCase):
seq, res = '', []
class TestBig(EnumerateTestCase):
seq = range(10,20000,2)
res = list(zip(range(20000), seq))
class TestReversed(unittest.TestCase, PickleTest):
def test_simple(self):
class A:
def __getitem__(self, i):
if i < 5:
return str(i)
raise StopIteration
def __len__(self):
return 5
for data in 'abc', range(5), tuple(enumerate('abc')), A(), range(1,17,5):
self.assertEqual(list(data)[::-1], list(reversed(data)))
self.assertRaises(TypeError, reversed, {})
# don't allow keyword arguments
self.assertRaises(TypeError, reversed, [], a=1)
def test_range_optimization(self):
x = range(1)
self.assertEqual(type(reversed(x)), type(iter(x)))
def test_len(self):
for s in ('hello', tuple('hello'), list('hello'), range(5)):
self.assertEqual(operator.length_hint(reversed(s)), len(s))
r = reversed(s)
list(r)
self.assertEqual(operator.length_hint(r), 0)
class SeqWithWeirdLen:
called = False
def __len__(self):
if not self.called:
self.called = True
return 10
raise ZeroDivisionError
def __getitem__(self, index):
return index
r = reversed(SeqWithWeirdLen())
self.assertRaises(ZeroDivisionError, operator.length_hint, r)
def test_gc(self):
class Seq:
def __len__(self):
return 10
def __getitem__(self, index):
return index
s = Seq()
r = reversed(s)
s.r = r
def test_args(self):
self.assertRaises(TypeError, reversed)
self.assertRaises(TypeError, reversed, [], 'extra')
@unittest.skipUnless(hasattr(sys, 'getrefcount'), 'test needs sys.getrefcount()')
def test_bug1229429(self):
# this bug was never in reversed, it was in
# PyObject_CallMethod, and reversed_new calls that sometimes.
def f():
pass
r = f.__reversed__ = object()
rc = sys.getrefcount(r)
for i in range(10):
try:
reversed(f)
except TypeError:
pass
else:
self.fail("non-callable __reversed__ didn't raise!")
self.assertEqual(rc, sys.getrefcount(r))
def test_objmethods(self):
# Objects must have __len__() and __getitem__() implemented.
class NoLen(object):
def __getitem__(self): return 1
nl = NoLen()
self.assertRaises(TypeError, reversed, nl)
class NoGetItem(object):
def __len__(self): return 2
ngi = NoGetItem()
self.assertRaises(TypeError, reversed, ngi)
def test_pickle(self):
for data in 'abc', range(5), tuple(enumerate('abc')), range(1,17,5):
self.check_pickle(reversed(data), list(data)[::-1])
class EnumerateStartTestCase(EnumerateTestCase):
def test_basicfunction(self):
e = self.enum(self.seq)
self.assertEqual(iter(e), e)
self.assertEqual(list(self.enum(self.seq)), self.res)
class TestStart(EnumerateStartTestCase):
enum = lambda self, i: enumerate(i, start=11)
seq, res = 'abc', [(11, 'a'), (12, 'b'), (13, 'c')]
class TestLongStart(EnumerateStartTestCase):
enum = lambda self, i: enumerate(i, start=sys.maxsize+1)
seq, res = 'abc', [(sys.maxsize+1,'a'), (sys.maxsize+2,'b'),
(sys.maxsize+3,'c')]
def test_main(verbose=None):
support.run_unittest(__name__)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(__name__)
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| lgpl-3.0 |
betoesquivel/CIE | flask/lib/python2.7/site-packages/sqlalchemy/event/base.py | 33 | 7248 | # event/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base implementation classes.
The public-facing ``Events`` serves as the base class for an event interface;
its public attributes represent different kinds of events. These attributes
are mirrored onto a ``_Dispatch`` class, which serves as a container for
collections of listener functions. These collections are represented both
at the class level of a particular ``_Dispatch`` class as well as within
instances of ``_Dispatch``.
"""
from __future__ import absolute_import
from .. import util
from .attr import _JoinedDispatchDescriptor, \
_EmptyListener, _DispatchDescriptor
_registrars = util.defaultdict(list)
def _is_event_name(name):
return not name.startswith('_') and name != 'dispatch'
class _UnpickleDispatch(object):
"""Serializable callable that re-generates an instance of
:class:`_Dispatch` given a particular :class:`.Events` subclass.
"""
def __call__(self, _parent_cls):
for cls in _parent_cls.__mro__:
if 'dispatch' in cls.__dict__:
return cls.__dict__['dispatch'].dispatch_cls(_parent_cls)
else:
raise AttributeError("No class with a 'dispatch' member present.")
class _Dispatch(object):
"""Mirror the event listening definitions of an Events class with
listener collections.
Classes which define a "dispatch" member will return a
non-instantiated :class:`._Dispatch` subclass when the member
is accessed at the class level. When the "dispatch" member is
accessed at the instance level of its owner, an instance
of the :class:`._Dispatch` class is returned.
A :class:`._Dispatch` class is generated for each :class:`.Events`
class defined, by the :func:`._create_dispatcher_class` function.
The original :class:`.Events` classes remain untouched.
This decouples the construction of :class:`.Events` subclasses from
the implementation used by the event internals, and allows
inspecting tools like Sphinx to work in an unsurprising
way against the public API.
"""
_events = None
"""reference the :class:`.Events` class which this
:class:`._Dispatch` is created for."""
def __init__(self, _parent_cls):
self._parent_cls = _parent_cls
@util.classproperty
def _listen(cls):
return cls._events._listen
def _join(self, other):
"""Create a 'join' of this :class:`._Dispatch` and another.
This new dispatcher will dispatch events to both
:class:`._Dispatch` objects.
"""
if '_joined_dispatch_cls' not in self.__class__.__dict__:
cls = type(
"Joined%s" % self.__class__.__name__,
(_JoinedDispatcher, self.__class__), {}
)
for ls in _event_descriptors(self):
setattr(cls, ls.name, _JoinedDispatchDescriptor(ls.name))
self.__class__._joined_dispatch_cls = cls
return self._joined_dispatch_cls(self, other)
def __reduce__(self):
return _UnpickleDispatch(), (self._parent_cls, )
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
for ls in _event_descriptors(other):
if isinstance(ls, _EmptyListener):
continue
getattr(self, ls.name).\
for_modify(self)._update(ls, only_propagate=only_propagate)
@util.hybridmethod
def _clear(self):
for attr in dir(self):
if _is_event_name(attr):
getattr(self, attr).for_modify(self).clear()
def _event_descriptors(target):
return [getattr(target, k) for k in dir(target) if _is_event_name(k)]
class _EventMeta(type):
"""Intercept new Event subclasses and create
associated _Dispatch classes."""
def __init__(cls, classname, bases, dict_):
_create_dispatcher_class(cls, classname, bases, dict_)
return type.__init__(cls, classname, bases, dict_)
def _create_dispatcher_class(cls, classname, bases, dict_):
"""Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
dispatch_base = getattr(cls, 'dispatch', _Dispatch)
dispatch_cls = type("%sDispatch" % classname,
(dispatch_base, ), {})
cls._set_dispatch(cls, dispatch_cls)
for k in dict_:
if _is_event_name(k):
setattr(dispatch_cls, k, _DispatchDescriptor(cls, dict_[k]))
_registrars[k].append(cls)
if getattr(cls, '_dispatch_target', None):
cls._dispatch_target.dispatch = dispatcher(cls)
def _remove_dispatcher(cls):
for k in dir(cls):
if _is_event_name(k):
_registrars[k].remove(cls)
if not _registrars[k]:
del _registrars[k]
class Events(util.with_metaclass(_EventMeta, object)):
"""Define event listening functions for a particular target type."""
@staticmethod
def _set_dispatch(cls, dispatch_cls):
# this allows an Events subclass to define additional utility
# methods made available to the target via
# "self.dispatch._events.<utilitymethod>"
# @staticemethod to allow easy "super" calls while in a metaclass
# constructor.
cls.dispatch = dispatch_cls
dispatch_cls._events = cls
@classmethod
def _accept_with(cls, target):
# Mapper, ClassManager, Session override this to
# also accept classes, scoped_sessions, sessionmakers, etc.
if hasattr(target, 'dispatch') and (
isinstance(target.dispatch, cls.dispatch) or
isinstance(target.dispatch, type) and
issubclass(target.dispatch, cls.dispatch)
):
return target
else:
return None
@classmethod
def _listen(cls, event_key, propagate=False, insert=False, named=False):
event_key.base_listen(propagate=propagate, insert=insert, named=named)
@classmethod
def _remove(cls, event_key):
event_key.remove()
@classmethod
def _clear(cls):
cls.dispatch._clear()
class _JoinedDispatcher(object):
"""Represent a connection between two _Dispatch objects."""
def __init__(self, local, parent):
self.local = local
self.parent = parent
self._parent_cls = local._parent_cls
class dispatcher(object):
"""Descriptor used by target classes to
deliver the _Dispatch class at the class level
and produce new _Dispatch instances for target
instances.
"""
def __init__(self, events):
self.dispatch_cls = events.dispatch
self.events = events
def __get__(self, obj, cls):
if obj is None:
return self.dispatch_cls
obj.__dict__['dispatch'] = disp = self.dispatch_cls(cls)
return disp
| mit |
stevielu/viewfinder | backend/www/admin/admin.py | 13 | 3707 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Handlers for viewfinder web application administration.
AdminHandler: top-level admin handler
"""
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
import httplib
import logging
import os
import traceback
from tornado import gen, web
from viewfinder.backend.base import counters, handler
from viewfinder.backend.db.db_client import DBClient
from viewfinder.backend.db import schema, vf_schema
from viewfinder.backend.db.admin_permissions import AdminPermissions
from viewfinder.backend.www import basic_auth
_req_per_sec = counters.define_rate('viewfinder.admin.www.requests_per_second',
'Administrator website requests handled per second.')
def require_permission(level=None):
"""Decorator to be used in admin get/post methods.
Permission required may be 'root', 'support', or None.
If None is specified, the user must still be in the AdminPermissions table.
Permissions are stored in self._permissions for later access.
"""
def decorator(f):
@gen.engine
def wrapper(self, *args, **kwargs):
assert level in [None, 'root', 'support']
self._permissions = yield gen.Task(self.QueryAdminPermissions)
if level == 'root':
self.CheckIsRoot()
elif level == 'support':
self.CheckIsSupport()
f(self, *args, **kwargs)
return wrapper
return decorator
class AdminHandler(basic_auth.BasicAuthHandler):
"""Directory of administration tasks."""
def prepare(self):
basic_auth.BasicAuthHandler.prepare(self)
self._auth_credentials = self.get_current_user()
_req_per_sec.increment()
@handler.authenticated()
@handler.asynchronous(datastore=True)
# We only require that the user exists. Actual rights are only used here to build the link table.
# They will be checked by each sub page.
@require_permission()
def get(self):
t_dict = self.PermissionsTemplateDict()
self.render('admin.html', **t_dict)
def CheckIsRoot(self):
"""Check whether the permissions object has a ROOT rights entry."""
if not self._permissions.IsRoot():
raise web.HTTPError(httplib.FORBIDDEN, 'User %s does not have root credentials.' % self._auth_credentials)
def CheckIsSupport(self):
"""Check whether the permissions object has a SUPPORT rights entry. Root users do not automatically get
granted support rights.
"""
if not self._permissions.IsSupport():
raise web.HTTPError(httplib.FORBIDDEN, 'User %s does not have support credentials.' % self._auth_credentials)
def PermissionsTemplateDict(self):
"""Dict of variables used in all admin templates."""
return { 'auth_credentials': self._auth_credentials,
'is_root': self._permissions.IsRoot(),
'is_support': self._permissions.IsSupport() }
@gen.engine
def QueryAdminPermissions(self, callback):
"""Get set of permissions for user. Raise an error if the user does not have an entry,
of if the set of rights is empty.
"""
permissions = yield gen.Task(AdminPermissions.Query, self._client, self._auth_credentials, None, must_exist=False)
if permissions is None or not permissions.rights:
raise web.HTTPError(httplib.FORBIDDEN, 'User %s has no credentials.' % self._auth_credentials)
callback(permissions)
def _handle_request_exception(self, value):
"""Handles presentation of an exception condition to the admin.
"""
logging.exception('error in admin page')
self.render('admin_error.html',
auth_credentials=self._auth_credentials, is_root=False, is_support=False,
title=value, message=traceback.format_exc())
return True
| apache-2.0 |
savoirfairelinux/django | django/core/management/commands/makemigrations.py | 18 | 14261 | import os
import sys
from itertools import takewhile
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections, router
from django.db.migrations import Migration
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import (
InteractiveMigrationQuestioner, MigrationQuestioner,
NonInteractiveMigrationQuestioner,
)
from django.db.migrations.state import ProjectState
from django.db.migrations.utils import get_migration_name_timestamp
from django.db.migrations.writer import MigrationWriter
class Command(BaseCommand):
help = "Creates new migration(s) for apps."
def add_arguments(self, parser):
parser.add_argument(
'args', metavar='app_label', nargs='*',
help='Specify the app label(s) to create migrations for.',
)
parser.add_argument(
'--dry-run', action='store_true', dest='dry_run',
help="Just show what migrations would be made; don't actually write them.",
)
parser.add_argument(
'--merge', action='store_true', dest='merge',
help="Enable fixing of migration conflicts.",
)
parser.add_argument(
'--empty', action='store_true', dest='empty',
help="Create an empty migration.",
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'-n', '--name', action='store', dest='name', default=None,
help="Use this name for migration file(s).",
)
parser.add_argument(
'--check', action='store_true', dest='check_changes',
help='Exit with a non-zero status if model changes are missing migrations.',
)
def handle(self, *app_labels, **options):
self.verbosity = options['verbosity']
self.interactive = options['interactive']
self.dry_run = options['dry_run']
self.merge = options['merge']
self.empty = options['empty']
self.migration_name = options['name']
check_changes = options['check_changes']
# Make sure the app they asked for exists
app_labels = set(app_labels)
bad_app_labels = set()
for app_label in app_labels:
try:
apps.get_app_config(app_label)
except LookupError:
bad_app_labels.add(app_label)
if bad_app_labels:
for app_label in bad_app_labels:
self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignore_no_migrations=True)
# Raise an error if any migrations are applied before their dependencies.
consistency_check_labels = {config.label for config in apps.get_app_configs()}
# Non-default databases are only checked if database routers used.
aliases_to_check = connections if settings.DATABASE_ROUTERS else [DEFAULT_DB_ALIAS]
for alias in sorted(aliases_to_check):
connection = connections[alias]
if (connection.settings_dict['ENGINE'] != 'django.db.backends.dummy' and any(
# At least one model must be migrated to the database.
router.allow_migrate(connection.alias, app_label, model_name=model._meta.object_name)
for app_label in consistency_check_labels
for model in apps.get_app_config(app_label).get_models()
)):
loader.check_consistent_history(connection)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detect_conflicts()
# If app_labels is specified, filter out conflicting migrations for unspecified apps
if app_labels:
conflicts = {
app_label: conflict for app_label, conflict in conflicts.items()
if app_label in app_labels
}
if conflicts and not self.merge:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.stdout.write("No conflicts detected to merge.")
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
return self.handle_merge(loader, conflicts)
if self.interactive:
questioner = InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
else:
questioner = NonInteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
questioner,
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not app_labels:
raise CommandError("You must supply at least one app label when using --empty.")
# Make a fake changes() result we can pass to arrange_for_graph
changes = {
app: [Migration("custom", app)]
for app in app_labels
}
changes = autodetector.arrange_for_graph(
changes=changes,
graph=loader.graph,
migration_name=self.migration_name,
)
self.write_migration_files(changes)
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
migration_name=self.migration_name,
)
if not changes:
# No changes? Tell them.
if self.verbosity >= 1:
if len(app_labels) == 1:
self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
elif len(app_labels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
else:
self.stdout.write("No changes detected")
else:
self.write_migration_files(changes)
if check_changes:
sys.exit(1)
def write_migration_files(self, changes):
"""
Take a changes dict and write them out as migration files.
"""
directory_created = {}
for app_label, app_migrations in changes.items():
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
for migration in app_migrations:
# Describe the migration
writer = MigrationWriter(migration)
if self.verbosity >= 1:
# Display a relative path if it's below the current working
# directory, or an absolute path otherwise.
try:
migration_string = os.path.relpath(writer.path)
except ValueError:
migration_string = writer.path
if migration_string.startswith('..'):
migration_string = writer.path
self.stdout.write(" %s\n" % (self.style.MIGRATE_LABEL(migration_string),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
if not self.dry_run:
# Write the migrations file to the disk.
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label):
if not os.path.isdir(migrations_directory):
os.mkdir(migrations_directory)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
migration_string = writer.as_string()
with open(writer.path, "w", encoding='utf-8') as fh:
fh.write(migration_string)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will output the migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
def handle_merge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner()
else:
questioner = MigrationQuestioner(defaults={'ask_merge': True})
for app_label, migration_names in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
merge_migrations = []
for migration_name in migration_names:
migration = loader.get_migration(app_label, migration_name)
migration.ancestry = [
mig for mig in loader.graph.forwards_plan((app_label, migration_name))
if mig[0] == migration.app_label
]
merge_migrations.append(migration)
def all_items_equal(seq):
return all(item == seq[0] for item in seq[1:])
merge_migrations_generations = zip(*(m.ancestry for m in merge_migrations))
common_ancestor_count = sum(1 for common_ancestor_generation
in takewhile(all_items_equal, merge_migrations_generations))
if not common_ancestor_count:
raise ValueError("Could not find common ancestor of %s" % migration_names)
# Now work out the operations along each divergent branch
for migration in merge_migrations:
migration.branch = migration.ancestry[common_ancestor_count:]
migrations_ops = (loader.get_migration(node_app, node_name).operations
for node_app, node_name in migration.branch)
migration.merged_operations = sum(migrations_ops, [])
# In future, this could use some of the Optimizer code
# (can_optimize_through) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label))
for migration in merge_migrations:
self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.merged_operations:
self.stdout.write(" - %s\n" % operation.describe())
if questioner.ask_merge(app_label):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parse_number(migration.name)
for migration in merge_migrations
]
try:
biggest_number = max(x for x in numbers if x is not None)
except ValueError:
biggest_number = 1
subclass = type("Migration", (Migration, ), {
"dependencies": [(app_label, migration.name) for migration in merge_migrations],
})
migration_name = "%04i_%s" % (
biggest_number + 1,
self.migration_name or ("merge_%s" % get_migration_name_timestamp())
)
new_migration = subclass(migration_name, app_label)
writer = MigrationWriter(new_migration)
if not self.dry_run:
# Write the merge migrations file to the disk
with open(writer.path, "w", encoding='utf-8') as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write("\nCreated new merge migration %s" % writer.path)
elif self.verbosity == 3:
# Alternatively, makemigrations --merge --dry-run --verbosity 3
# will output the merge migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full merge migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
| bsd-3-clause |
Watkurem/Arch_Lab1 | tests.py | 1 | 36384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2016 Alexander Melnyk / Олександр Мельник
#
# This file is part of Arch_Lab package.
#
# Arch_Lab is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Arch_Lab is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Arch_Lab. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
import unittest
import unittest.mock as mock
import io
import copy
import random
import datetime
import string
import configparser
import pickle
import yaml
import json
import lab
import engine
import pickle_backend
import yaml_backend
import json_backend
from interface import TerminalInterface
class TestSuccess(Exception):
pass
@mock.patch('pickle_backend.open')
class TestPickleBackend(unittest.TestCase):
fbk = pickle_backend.PickleFileBackend
testval = [123, 123]
def setUp(self):
self.fakefil = io.BytesIO()
def test_save(self, mopen):
mopen().__enter__.return_value = self.fakefil
self.fbk.save("/tmp/blah", self.testval)
self.fakefil.seek(0)
self.assertEqual(self.testval, pickle.load(self.fakefil))
def test_read_correct(self, mopen):
mopen().__enter__.return_value = self.fakefil
pickle.dump(self.testval, self.fakefil)
self.fakefil.seek(0)
self.assertEqual(self.testval, self.fbk.load(self.fakefil))
def test_read_EOFError(self, mopen):
mopen().__enter__.return_value = self.fakefil
pickle.dump(self.testval, self.fakefil)
self.assertEqual(([], []), self.fbk.load("/tmp/blah"))
def test_read_FileNotFoundError(self, mopen):
mopen.side_effect = FileNotFoundError()
self.assertEqual(([], []), self.fbk.load("/tmp/blah"))
@mock.patch('yaml_backend.open')
class TestYamlBackend(unittest.TestCase):
fbk = yaml_backend.YamlFileBackend
testval = [123, 123]
def setUp(self):
self.fakefil = io.StringIO()
def test_save(self, mopen):
mopen().__enter__.return_value = self.fakefil
self.fbk.save("/tmp/blah", self.testval)
self.fakefil.seek(0)
self.assertEqual(self.testval, yaml.load(self.fakefil))
def test_read_correct(self, mopen):
mopen().__enter__.return_value = self.fakefil
yaml.dump(self.testval, self.fakefil)
self.fakefil.seek(0)
self.assertEqual(self.testval, self.fbk.load(self.fakefil))
def test_read_EOFError(self, mopen):
mopen().__enter__.return_value = self.fakefil
yaml.dump(self.testval, self.fakefil)
self.assertEqual(([], []), self.fbk.load("/tmp/blah"))
def test_read_FileNotFoundError(self, mopen):
mopen.side_effect = FileNotFoundError()
self.assertEqual(([], []), self.fbk.load("/tmp/blah"))
@mock.patch('json_backend.open')
class TestJsonBackend(unittest.TestCase):
fbk = json_backend.JsonFileBackend
testval = [123, 123]
Task_testval = ([engine.Task('123', 123, 1, 1)],
[engine.Task('1234', 132, 11, 11)])
def setUp(self):
self.fakefil = io.StringIO()
def test_save_load_correct(self, mopen):
mopen().__enter__.return_value = self.fakefil
self.fbk.save("/tmp/blah", self.Task_testval)
self.fakefil.seek(0)
self.assertEqual(self.Task_testval, self.fbk.load(self.fakefil))
def test_save_not_Task(self, mopen):
mopen().__enter__.return_value = self.fakefil
self.fbk.save("/tmp/blah", self.testval)
self.fakefil.seek(0)
self.assertEqual(self.testval, json.load(self.fakefil))
def test_read_EOFError(self, mopen):
mopen().__enter__.return_value = self.fakefil
json.dump(self.testval, self.fakefil)
self.assertEqual(([], []), self.fbk.load("/tmp/blah"))
def test_read_FileNotFoundError(self, mopen):
mopen.side_effect = FileNotFoundError()
self.assertEqual(([], []), self.fbk.load("/tmp/blah"))
class TestTerminalInterface(unittest.TestCase):
testopts = [["A", "abc"]]
testtitle = "Blah"
testdate = datetime.date.today()
testtasks = [("abc", testdate - datetime.timedelta(days=1)),
("XyZ", testdate),
("", testdate + datetime.timedelta(days=1))]
def test_init_TypeError(self):
self.assertRaises(TypeError, TerminalInterface)
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_welcome(self, mock_stdout):
TerminalInterface.welcome()
self.assertEqual(mock_stdout.getvalue(), " \x1b[1mWelcome to the " +
"Arch_Lab task planner!\x1b[0m" + '\n')
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_menu(self, mock_stdout):
TerminalInterface.menu(self.testopts, self.testtitle)
correct_result = ("================================================================================\n"
"{}\n".format(self.testtitle))
for x in self.testopts:
correct_result += " [{0[0]}] {0[1]}\n".format(x)
self.assertEqual(mock_stdout.getvalue(), correct_result)
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_menu_decide_found(self, mock_stdout):
c = random.randrange(0, len(self.testopts))
ret = TerminalInterface.menu_decide(self.testopts, self.testopts[c][0])
self.assertEqual(ret, self.testopts[c])
def test_menu_decide_not_found(self):
ret = TerminalInterface.menu_decide(self.testopts, "quack")
self.assertEqual(ret, None)
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_print_tasks_none_found(self, mock_stdout):
correct_result = ("================================================================================\n"
"\t>> No tasks found <<\n")
TerminalInterface.print_tasks([], False)
self.assertEqual(mock_stdout.getvalue(), correct_result)
mock_stdout.seek(0)
TerminalInterface.print_tasks([], True)
self.assertEqual(mock_stdout.getvalue(), correct_result)
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_print_tasks_finished_true(self, mock_stdout):
TerminalInterface.print_tasks(self.testtasks, True)
correct_result = ("================================================================================\n" +
"[0]\t " + self.testtasks[0][1].strftime("%d %b %Y, %A:") + '\n' +
" " + self.testtasks[0][0] + '\n\n'
"[1]\t " + self.testtasks[1][1].strftime("%d %b %Y, %A:") + '\n' +
" " + self.testtasks[1][0] + '\n\n'
"[2]\t " + self.testtasks[2][1].strftime("%d %b %Y, %A:") + '\n' +
" " + self.testtasks[2][0] + '\n')
self.assertEqual(mock_stdout.getvalue(), correct_result)
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_print_tasks_finished_false(self, mock_stdout):
TerminalInterface.print_tasks(self.testtasks, False)
correct_result = ("================================================================================\n" +
"[0]\t " + self.testtasks[0][1].strftime("%d %b %Y, %A:") + ' \x1b[1;31m<< !!OVERDUE!!\x1b[0m\n' +
" " + self.testtasks[0][0] + '\n\n'
"[1]\t " + self.testtasks[1][1].strftime("%d %b %Y, %A:") + ' \x1b[1;32m<< Today!\x1b[0m\n' +
" " + self.testtasks[1][0] + '\n\n'
"[2]\t " + self.testtasks[2][1].strftime("%d %b %Y, %A:") + '\n' +
" " + self.testtasks[2][0] + '\n')
self.assertEqual(mock_stdout.getvalue(), correct_result)
@mock.patch('interface.TerminalInterface.print_tasks')
def test_print_finished_tasks(self, mock_print_tasks):
TerminalInterface.print_finished_tasks(self.testtasks)
mock_print_tasks.assert_called_once_with(self.testtasks, True)
@mock.patch('interface.input')
@mock.patch('interface.TerminalInterface.menu')
@mock.patch('interface.TerminalInterface.menu_decide')
def test_finished_tasks_menu(self, mock_menu_decide,
mock_menu, mock_input):
foo = "foo"
quack = "quack"
mock_input.return_value = quack
mock_menu_decide.return_value = foo
self.assertEqual(foo,
TerminalInterface.finished_tasks_menu(self.testopts))
mock_menu.assert_called_once_with(self.testopts,
"You are viewing finished tasks")
mock_menu_decide.assert_called_once_with(self.testopts, quack)
@mock.patch('interface.TerminalInterface.print_tasks')
def test_print_pending_tasks(self, mock_print_tasks):
TerminalInterface.print_pending_tasks(self.testtasks)
mock_print_tasks.assert_called_once_with(self.testtasks, False)
@mock.patch('interface.input')
@mock.patch('interface.TerminalInterface.menu')
@mock.patch('interface.TerminalInterface.menu_decide')
def test_pending_tasks_menu(self, mock_menu_decide,
mock_menu, mock_input):
foo = "foo"
quack = "quack"
mock_input.return_value = quack
mock_menu_decide.return_value = foo
self.assertEqual(foo,
TerminalInterface.pending_tasks_menu(self.testopts))
mock_menu.assert_called_once_with(self.testopts,
"You are viewing pending tasks")
mock_menu_decide.assert_called_once_with(self.testopts, quack)
@mock.patch('interface.input')
def test_ask_task_correct(self, mock_input):
mock_input.return_value = "123"
self.assertEqual(123, TerminalInterface.ask_task())
@mock.patch('interface.input')
def test_ask_task_wrong(self, mock_input):
mock_input.return_value = "a"
self.assertEqual(None, TerminalInterface.ask_task())
@mock.patch('interface.input')
def test_task_input_correct(self, mock_input):
mock_input.side_effect = ["Descript", "1234-01-01"]
self.assertEqual(("Descript", 1234, 1, 1),
TerminalInterface.task_input())
@mock.patch('interface.input')
def test_task_input_wrong(self, mock_input):
mock_input.side_effect = ["", "1234-F7-01"]
self.assertEqual(("", None, None, None),
TerminalInterface.task_input())
@mock.patch('interface.TerminalInterface.task_input')
def test_new_task_dialog(self, mock_task_input):
mock_task_input.return_value = "quack"
self.assertEqual("quack",
TerminalInterface.new_task_dialog())
mock_task_input.assert_called_once_with()
@mock.patch('interface.TerminalInterface.task_input')
def test_edit_task_dialog(self, mock_task_input):
mock_task_input.return_value = "quack"
self.assertEqual("quack",
TerminalInterface.edit_task_dialog(random.randint))
mock_task_input.assert_called_once_with()
@mock.patch('interface.input')
def test_bad_task(self, mock_input):
mock_input.return_value = "a"
self.assertEqual(None, TerminalInterface.bad_task())
@mock.patch('interface.input')
def test_bad_input(self, mock_input):
mock_input.return_value = "a"
self.assertEqual(None, TerminalInterface.bad_input())
@mock.patch('interface.input')
def test_save_dialog_no(self, mock_input):
mock_input.return_value = "n"
self.assertEqual(False, TerminalInterface.save_dialog())
mock_input.return_value = "N"
self.assertEqual(False, TerminalInterface.save_dialog())
@mock.patch('interface.input')
def test_save_dialog_others(self, mock_input):
cases = string.printable.replace("n", "").replace("N", "")
for c in cases:
mock_input.return_value = c
self.assertEqual(True, TerminalInterface.save_dialog())
@mock.patch('interface.input')
def test_config_menu_correct(self, mock_input):
quack = "Quack"
stls = (("asdf", "asfnoiewjndv"), ("123", "foobar"))
cases = {x[0] for x in stls}
for c in cases:
mock_input.return_value = c
self.assertEqual(c, TerminalInterface.config_menu(quack, stls))
@mock.patch('interface.input')
def test_config_menu_wrong(self, mock_input):
quack = "Quack"
stls = (("asdf", "asfnoiewjndv"), ("123", "foobar"))
cases = ["Wrongchoice"]
for c in cases:
mock_input.return_value = c
self.assertEqual(quack, TerminalInterface.config_menu(quack, stls))
@mock.patch('interface.input')
def test_config_menu_empty(self, mock_input):
quack = "Quack"
stls = (("asdf", "asfnoiewjndv"), ("123", "foobar"))
cases = [""]
for c in cases:
mock_input.return_value = c
self.assertEqual(quack, TerminalInterface.config_menu(quack, stls))
class TestFileBackend(unittest.TestCase):
def test_init_TypeError(self):
self.assertRaises(TypeError, engine.FileBackend)
def test_save_NotImplementedError(self):
self.assertRaises(NotImplementedError, engine.FileBackend.save, 1, 1)
def test_load_NotImplementedError(self):
self.assertRaises(NotImplementedError, engine.FileBackend.load, 1)
class TestEngineConfig(unittest.TestCase):
def setUp(self):
self.t = mock.MagicMock()
def test_init_TypeError(self):
self.assertRaises(TypeError, engine.EngineConfig)
@mock.patch('engine.type', new=lambda x: False)
@mock.patch('engine.sys.exit')
@mock.patch('engine.configparser.ConfigParser')
def test_init_config_broken(self, mock_ConfigParser, mock_exit):
mock_exit.side_effect = TestSuccess
with self.assertRaises(TestSuccess):
engine.EngineConfig()
mock_ConfigParser.assert_called_once_with()
mock_exit.assert_called_once_with(1)
@mock.patch('engine.type', new=lambda x: False)
def test_init_config_pickle(self):
mock_config = configparser.ConfigParser()
mock_config.read = mock.MagicMock()
mock_backend = mock.MagicMock()
with mock.patch('engine.configparser.ConfigParser') as mock_CP:
with mock.patch.dict('sys.modules', **{
'pickle_backend': mock_backend
}):
mock_CP.return_value = mock_config
tmp = engine.EngineConfig()
mock_CP.assert_called_once_with()
self.assertEqual(tmp.config, mock_config)
self.assertEqual(mock_config['DEFAULT']['savemethod'], 'pickle')
self.assertEqual(tmp.file_backend, mock_backend.PickleFileBackend)
self.assertEqual(tmp.savefile, lab.SAVEFILE + '.pkl')
@mock.patch('engine.type', new=lambda x: False)
def test_init_config_json(self):
mock_config = configparser.ConfigParser()
mock_config.read = mock.MagicMock()
mock_config['DEFAULT']['savemethod'] = 'json'
mock_backend = mock.MagicMock()
with mock.patch('engine.configparser.ConfigParser') as mock_CP:
with mock.patch.dict('sys.modules', **{
'json_backend': mock_backend
}):
mock_CP.return_value = mock_config
tmp = engine.EngineConfig()
mock_CP.assert_called_once_with()
self.assertEqual(tmp.config, mock_config)
self.assertEqual(mock_config['DEFAULT']['savemethod'], 'json')
self.assertEqual(tmp.file_backend, mock_backend.JsonFileBackend)
self.assertEqual(tmp.savefile, lab.SAVEFILE + '.json')
@mock.patch('engine.type', new=lambda x: False)
def test_init_config_yaml(self):
mock_config = configparser.ConfigParser()
mock_config.read = mock.MagicMock()
mock_config['DEFAULT']['savemethod'] = 'yaml'
mock_backend = mock.MagicMock()
with mock.patch('engine.configparser.ConfigParser') as mock_CP:
with mock.patch.dict('sys.modules', **{
'yaml_backend': mock_backend
}):
mock_CP.return_value = mock_config
tmp = engine.EngineConfig()
mock_CP.assert_called_once_with()
self.assertEqual(tmp.config, mock_config)
self.assertEqual(mock_config['DEFAULT']['savemethod'], 'yaml')
self.assertEqual(tmp.file_backend, mock_backend.YamlFileBackend)
self.assertEqual(tmp.savefile, lab.SAVEFILE + '.yaml')
def test_get_savemethod(self):
self.t.config = mock.MagicMock()
self.t.testmeth = engine.EngineConfig.get_savemethod
self.assertEqual(self.t.config['DEFAULT']['savemethod'],
self.t.testmeth(self.t))
def test_get_available_savemethods(self):
self.assertEqual(engine.AVAILABLE_SAVEMETHODS,
engine.EngineConfig.get_available_savemethods(self.t))
@mock.patch('engine.sys.exit', side_effect=TestSuccess)
@mock.patch('engine.open')
def test_set_savemethod(self, mopen, mexit):
self.t.config = configparser.ConfigParser()
self.t.config.write = mock.MagicMock()
self.t.testmeth = engine.EngineConfig.set_savemethod
mock_backend = mock.MagicMock()
self.assertRaises(TestSuccess, self.t.testmeth, self.t, 'puckle')
mopen.assert_called_with(lab.CONFIG, 'w')
self.assertTrue(self.t.config.write.called)
self.assertEqual(self.t.config['DEFAULT']['savemethod'], 'puckle')
with mock.patch.dict('sys.modules', **{
'pickle_backend': mock_backend
}):
self.t.testmeth(self.t, 'pickle')
mopen.assert_called_with(lab.CONFIG, 'w')
self.assertTrue(self.t.config.write.called)
self.assertEqual(self.t.config['DEFAULT']['savemethod'], 'pickle')
self.assertEqual(self.t.file_backend, mock_backend.PickleFileBackend)
self.assertEqual(self.t.savefile, lab.SAVEFILE + '.pkl')
with mock.patch.dict('sys.modules', **{
'json_backend': mock_backend
}):
self.t.testmeth(self.t, 'json')
mopen.assert_called_with(lab.CONFIG, 'w')
self.assertTrue(self.t.config.write.called)
self.assertEqual(self.t.config['DEFAULT']['savemethod'], 'json')
self.assertEqual(self.t.file_backend, mock_backend.JsonFileBackend)
self.assertEqual(self.t.savefile, lab.SAVEFILE + '.json')
with mock.patch.dict('sys.modules', **{
'yaml_backend': mock_backend
}):
self.t.testmeth(self.t, 'yaml')
mopen.assert_called_with(lab.CONFIG, 'w')
self.assertTrue(self.t.config.write.called)
self.assertEqual(self.t.config['DEFAULT']['savemethod'], 'yaml')
self.assertEqual(self.t.file_backend, mock_backend.YamlFileBackend)
self.assertEqual(self.t.savefile, lab.SAVEFILE + '.yaml')
class TestListEngine(unittest.TestCase):
class Quack():
def __init__(self, c, y, m, d):
self.content = c
self.date = datetime.date(y, m, d)
def __lt__(self, other):
return self.date < other.date
def __eq__(self, other):
return (self.content, self.date) == (other.content, other.date)
testval = (['abc', 'efg'], ['asp', 'hgkrf'])
testpen = [Quack("123", 1, 1, 1), Quack("abc", 2000, 10, 10)]
testfin = [Quack("", 537, 7, 27), Quack("xyz", 9999, 12, 30)]
def setUp(self):
self.t = mock.MagicMock()
self.t.pending_task_list = copy.deepcopy(self.testpen)
self.t.finished_task_list = copy.deepcopy(self.testfin)
def test_init(self):
self.t.testmeth = engine.ListEngine.__init__
self.t.file_backend.load = mock.MagicMock()
self.t.file_backend.load.return_value = self.testval
with mock.patch('engine.super'):
self.t.testmeth(self.t)
self.assertEqual((self.t.pending_task_list,
self.t.finished_task_list),
self.testval)
self.t.file_backend.load.assert_called_once_with(self.t.savefile)
def test_view_pending_tasks(self):
self.t.testmeth = engine.ListEngine.view_pending_tasks
correct = [("123", datetime.date(1, 1, 1)),
("abc", datetime.date(2000, 10, 10))]
self.assertEqual(correct, self.t.testmeth(self.t))
@mock.patch('engine.Task', new=Quack)
def test_new_task(self):
self.t.testmeth = engine.ListEngine.new_task
correct = [self.Quack("123", 1, 1, 1),
self.Quack("abc", 2000, 10, 10),
self.Quack("xyz", 9999, 12, 30)]
self.t.testmeth(self.t, "xyz", 9999, 12, 30)
self.assertEqual(correct, self.t.pending_task_list)
def test_remove_pending_task(self):
self.t.testmeth = engine.ListEngine.remove_pending_task
id = random.randrange(0, len(self.testpen))
correct = self.testpen[:id] + self.testpen[id+1:]
self.t.testmeth(self.t, id)
self.assertEqual(correct, self.t.pending_task_list)
def test_edit_pending_task(self):
self.t.testmeth = engine.ListEngine.edit_pending_task
correct = [self.Quack("123", 1, 1, 1),
self.Quack("xyz", 9999, 12, 30)]
self.t.testmeth(self.t, 1, "xyz", 9999, 12, 30)
self.assertEqual(correct, self.t.pending_task_list)
def test_finish_task(self):
self.t.testmeth = engine.ListEngine.finish_task
correct = ([self.Quack("123", 1, 1, 1)],
[self.Quack("", 537, 7, 27),
self.Quack("abc", 2000, 10, 10),
self.Quack("xyz", 9999, 12, 30)])
self.t.testmeth(self.t, 1)
self.assertEqual(correct, (self.t.pending_task_list,
self.t.finished_task_list))
def test_view_finished_tasks(self):
self.t.testmeth = engine.ListEngine.view_finished_tasks
correct = [("", datetime.date(537, 7, 27)),
("xyz", datetime.date(9999, 12, 30))]
self.assertEqual(correct, self.t.testmeth(self.t))
def test_clear_finished_tasks(self):
self.t.testmeth = engine.ListEngine.clear_finished_tasks
correct = []
self.t.testmeth(self.t)
self.assertEqual(correct, self.t.finished_task_list)
def test_remove_finished_task(self):
self.t.testmeth = engine.ListEngine.remove_finished_task
id = random.randrange(0, len(self.testpen))
correct = self.testfin[:id] + self.testfin[id+1:]
self.t.testmeth(self.t, id)
self.assertEqual(correct, self.t.finished_task_list)
def test_edit_finished_task(self):
self.t.testmeth = engine.ListEngine.edit_finished_task
correct = [self.Quack("123", 1, 1, 1),
self.Quack("xyz", 9999, 12, 30)]
self.t.testmeth(self.t, 0, "123", 1, 1, 1)
self.assertEqual(correct, self.t.finished_task_list)
def test_unfinish_task(self):
self.t.testmeth = engine.ListEngine.unfinish_task
correct = ([self.Quack("123", 1, 1, 1),
self.Quack("", 537, 7, 27),
self.Quack("abc", 2000, 10, 10)],
[self.Quack("xyz", 9999, 12, 30)])
self.t.testmeth(self.t, 0)
self.assertEqual(correct, (self.t.pending_task_list,
self.t.finished_task_list))
def test_save_tasks(self):
self.t.file_backend.save = mock.MagicMock()
self.t.savefile = mock.MagicMock()
self.t.testmeth = engine.ListEngine.save_tasks
self.t.testmeth(self.t)
self.t.file_backend.save.assert_called_with(
self.t.savefile,
(self.t.pending_task_list,
self.t.finished_task_list))
def test_changes_detected_T(self):
self.t.file_backend.load = mock.MagicMock()
self.t.file_backend.load.return_value = ([], [])
self.t.testmeth = engine.ListEngine.changes_detected
self.assertTrue(self.t.testmeth(self.t))
self.t.file_backend.load.assert_called_with(self.t.savefile)
def test_changes_detected_F(self):
self.t.file_backend.load = mock.MagicMock()
self.t.file_backend.load.return_value = (self.testpen, self.testfin)
self.t.testmeth = engine.ListEngine.changes_detected
self.assertFalse(self.t.testmeth(self.t))
self.t.file_backend.load.assert_called_with(self.t.savefile)
class TestTask(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
engine.Task(1, 1, 1, 1)
def test_lt(self):
self.assertTrue(engine.Task("abc", 1, 1, 1) <
engine.Task("jewrovisa", 1, 1, 2))
def test_hash(self):
tmp = engine.Task("abc", 1, 1, 1)
self.assertEqual(tmp.__hash__(),
hash(("abc", datetime.date(1, 1, 1))))
def test_eq_other_quacks_like_task(self):
x1 = engine.Task("abc", 1, 1, 1)
x2 = engine.Task("abc", 1, 1, 1)
self.assertEqual(x1, x2)
self.assertIsNot(x1, x2)
def test_eq_other_does_not_quack_like_task(self):
x1 = engine.Task("abc", 1, 1, 1)
x2 = ("abc", (1, 1, 1))
with self.assertRaises(NotImplementedError):
x1 == x2
def test_repr(self):
x1 = engine.Task("abc", 1, 1, 1)
self.assertEqual("Task('abc', 1, 1, 1)", repr(x1))
class TestEngine(unittest.TestCase):
def test_init(self):
self.assertRaises(TypeError, lab.Engine)
def test_view_pending_tasks(self):
self.assertRaises(NotImplementedError,
lab.Engine.view_pending_tasks,
None)
def test_new_task(self):
self.assertRaises(NotImplementedError,
lab.Engine.new_task,
None, None, None, None, None)
def test_remove_pending_task(self):
self.assertRaises(NotImplementedError,
lab.Engine.remove_pending_task,
None, None)
def test_edit_pending_task(self):
self.assertRaises(NotImplementedError,
lab.Engine.edit_pending_task,
None, None, None, None, None, None)
def test_finish_task(self):
self.assertRaises(NotImplementedError,
lab.Engine.finish_task,
None, None)
def test_view_finished_tasks(self):
self.assertRaises(NotImplementedError,
lab.Engine.view_finished_tasks,
None)
def test_clear_finished_tasks(self):
self.assertRaises(NotImplementedError,
lab.Engine.clear_finished_tasks,
None)
def test_remove_finished_task(self):
self.assertRaises(NotImplementedError,
lab.Engine.remove_finished_task,
None, None)
def test_edit_finished_task(self):
self.assertRaises(NotImplementedError,
lab.Engine.edit_finished_task,
None, None, None, None, None, None)
def test_unfinish_task(self):
self.assertRaises(NotImplementedError,
lab.Engine.unfinish_task,
None, None)
def test_save_tasks(self):
self.assertRaises(NotImplementedError,
lab.Engine.save_tasks,
None)
def test_get_savemethod(self):
self.assertRaises(NotImplementedError,
lab.Engine.get_savemethod,
None)
def test_get_available_savemethods(self):
self.assertRaises(NotImplementedError,
lab.Engine.get_available_savemethods,
None)
def test_set_savemethod(self):
self.assertRaises(NotImplementedError,
lab.Engine.set_savemethod,
None, None)
def test_changes_detected(self):
self.assertRaises(NotImplementedError,
lab.Engine.changes_detected,
None)
class TestController(unittest.TestCase):
def test_init_TypeError(self):
with self.assertRaises(TypeError):
lab.Controller(None, None)
@mock.patch('lab.type')
def test_init_normal(self, mtype):
i, e = "abc", "xyz"
tmp = lab.Controller(i, e)
self.assertEqual(i, tmp.interface)
self.assertEqual(e, tmp.engine)
def test_run(self):
self.assertRaises(NotImplementedError,
lab.Controller.run,
None)
def test_view_pending_tasks(self):
self.assertRaises(NotImplementedError,
lab.Controller.view_pending_tasks,
None)
def test_add_new_task(self):
self.assertRaises(NotImplementedError,
lab.Controller.add_new_task,
None)
def test_remove_pending_task(self):
self.assertRaises(NotImplementedError,
lab.Controller.remove_pending_task,
None)
def test_edit_pending_task(self):
self.assertRaises(NotImplementedError,
lab.Controller.edit_pending_task,
None)
def test_finish_task(self):
self.assertRaises(NotImplementedError,
lab.Controller.finish_task,
None)
def test_view_config_pending(self):
self.assertRaises(NotImplementedError,
lab.Controller.view_config_pending,
None)
def test_view_finished_tasks(self):
self.assertRaises(NotImplementedError,
lab.Controller.view_finished_tasks,
None)
def test_clear_finished_tasks(self):
self.assertRaises(NotImplementedError,
lab.Controller.clear_finished_tasks,
None)
def test_remove_finished_task(self):
self.assertRaises(NotImplementedError,
lab.Controller.remove_finished_task,
None)
def test_edit_finished_task(self):
self.assertRaises(NotImplementedError,
lab.Controller.edit_finished_task,
None)
def test_unfinish_task(self):
self.assertRaises(NotImplementedError,
lab.Controller.unfinish_task,
None)
def test_view_config_finished(self):
self.assertRaises(NotImplementedError,
lab.Controller.view_config_finished,
None)
def test_shutdown(self):
self.assertRaises(NotImplementedError,
lab.Controller.shutdown,
None)
def test_save_dialog(self):
self.assertRaises(NotImplementedError,
lab.Controller.save_dialog,
None)
class TestInterface(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
lab.Interface()
def test_welcome(self):
self.assertRaises(NotImplementedError,
lab.Interface.welcome)
def test_print_finished_tasks(self):
self.assertRaises(NotImplementedError,
lab.Interface.print_finished_tasks,
None)
def test_print_pending_tasks(self):
self.assertRaises(NotImplementedError,
lab.Interface.print_pending_tasks,
None)
def test_finished_tasks_menu(self):
self.assertRaises(NotImplementedError,
lab.Interface.finished_tasks_menu,
None)
def test_pending_tasks_menu(self):
self.assertRaises(NotImplementedError,
lab.Interface.pending_tasks_menu,
None)
def test_ask_task(self):
self.assertRaises(NotImplementedError,
lab.Interface.ask_task)
def test_new_task_dialog(self):
self.assertRaises(NotImplementedError,
lab.Interface.new_task_dialog)
def test_edit_task_dialog(self):
self.assertRaises(NotImplementedError,
lab.Interface.edit_task_dialog,
None)
def test_bad_task(self):
self.assertRaises(NotImplementedError,
lab.Interface.bad_task)
def test_bad_input(self):
self.assertRaises(NotImplementedError,
lab.Interface.bad_input)
def test_save_dialog(self):
self.assertRaises(NotImplementedError,
lab.Interface.save_dialog)
def test_config_menu(self):
self.assertRaises(NotImplementedError,
lab.Interface.config_menu,
None, None)
class TestLabMain(unittest.TestCase):
@mock.patch('lab.open')
@mock.patch('lab.sys.exit', side_effect=TestSuccess)
def test_main_argument(self, mexit, mopen):
mock_config = configparser.ConfigParser()
mock_config.read = mock.MagicMock()
m_interface = mock.MagicMock()
m_controller = mock.MagicMock()
m_engine = mock.MagicMock()
with mock.patch('engine.configparser.ConfigParser') as mock_CP:
with mock.patch.dict('sys.modules', **{
'interface': m_interface,
'engine': m_engine,
'controller': m_controller,
}):
mock_CP.return_value = mock_config
self.assertRaises(TestSuccess, lab.main)
self.assertEqual(mock_config['DEFAULT']['controller'], 'argument')
mock_config.read.assert_called_with(lab.CONFIG)
m_controller.ArgumentController.assert_called_with(
m_interface.TerminalInterface,
m_engine.ListEngine()
)
m_controller.ArgumentController.return_value.run.assert_called_with()
@mock.patch('lab.open')
@mock.patch('lab.sys.exit', side_effect=TestSuccess)
def test_main_simple(self, mexit, mopen):
mock_config = configparser.ConfigParser()
mock_config['DEFAULT']['controller'] = 'simple'
mock_config.read = mock.MagicMock()
m_interface = mock.MagicMock()
m_controller = mock.MagicMock()
m_engine = mock.MagicMock()
with mock.patch('engine.configparser.ConfigParser') as mock_CP:
with mock.patch.dict('sys.modules', **{
'interface': m_interface,
'engine': m_engine,
'controller': m_controller,
}):
mock_CP.return_value = mock_config
self.assertRaises(TestSuccess, lab.main)
self.assertEqual(mock_config['DEFAULT']['controller'], 'simple')
mock_config.read.assert_called_with(lab.CONFIG)
m_controller.SimpleController.assert_called_with(
m_interface.TerminalInterface,
m_engine.ListEngine()
)
m_controller.SimpleController.return_value.run.assert_called_with()
if __name__ == '__main__':
unittest.main(buffer=True)
| gpl-3.0 |
datasnakes/Datasnakes-Scripts | OrthoEvol/Tools/slackify/notify.py | 1 | 3182 | """Slackify sends messages or milestones to a slack channel."""
import configparser
import os
from slacker import Slacker
import slacker
class Slackify(object):
"""Send messages or milestones to a slack channel."""
def __init__(self, slackconfig='slackconfig.cfg', cfg=True):
"""Configure Slackify.
:param slackconfig: Path to config file.
:param cfg: Flag that is default True.
"""
config = configparser.ConfigParser()
# If there is not a config file
# Use False if you did not create a config file
if not cfg:
apikey = input('Insert your slack apikey here: ')
if len(apikey) != 42: # Standard length of slack apikey
raise ValueError('Your slack APIKEY is incorrect.')
slack = Slacker(apikey)
else:
# If there is a config file
if not os.path.isfile(slackconfig):
raise FileNotFoundError('Slack configuriation file not found.')
config.read(slackconfig)
# HINT Create a config file like the one described in the readme
apikey = config['APIKEYS']['slack']
slack = Slacker(apikey)
self.slack = slack
def _get_channel_id(self, channel):
"""Get a channel id for uploading files.
:param channel: Name of the channel to get an id for.
"""
channel_id = self.slack.channels.get_channel_id(channel)
return channel_id
def upload_file(self, file, channel):
"""Upload files (text/pdf/docx/log/image) to a slack channel.
:param file: Path to a file to upload.
:param channel: Name of the channel to upload the file to.
"""
channel_id = self._get_channel_id(channel)
self.slack.files.upload(file_=file, channels=channel_id)
def send_msg(self, channel, message):
"""Post a message to slack channel.
Send a message to a user using <@username>
:param channel: Name of the channel to send a message to.
:param message: Message to send to a channel.
"""
# With as user as True, the predefined bot name is used
try:
self.slack.chat.post_message(channel, message, as_user=True)
print('Your message was sent.')
except slacker.Error:
print('Your message was not sent!.')
# TODO add a traceback here.
# TODO also use contextlib
def list_users(self):
"""List all users for your slack organization."""
response = self.slack.users.list()
users = [username['name'] for username in response.body['members']]
return users
def list_channels(self):
"""List all channels for your slack organization."""
response = self.slack.channels.list()
channels = [channel['name'] for channel in response.body['channels']]
return channels
def log2slack(self):
"""Send a formatted text string to slack similar to logging."""
raise NotImplementedError('This function is not yet implemented.')
# TODO One day...create logging format for logging to slack.
| mit |
afedchin/xbmctorrent | resources/site-packages/bs4/testing.py | 440 | 24510 | """Helper classes for tests."""
import copy
import functools
import unittest
from unittest import TestCase
from bs4 import BeautifulSoup
from bs4.element import (
CharsetMetaAttributeValue,
Comment,
ContentMetaAttributeValue,
Doctype,
SoupStrainer,
)
from bs4.builder import HTMLParserTreeBuilder
default_builder = HTMLParserTreeBuilder
class SoupTest(unittest.TestCase):
@property
def default_builder(self):
return default_builder()
def soup(self, markup, **kwargs):
"""Build a Beautiful Soup object from markup."""
builder = kwargs.pop('builder', self.default_builder)
return BeautifulSoup(markup, builder=builder, **kwargs)
def document_for(self, markup):
"""Turn an HTML fragment into a document.
The details depend on the builder.
"""
return self.default_builder.test_fragment_to_document(markup)
def assertSoupEquals(self, to_parse, compare_parsed_to=None):
builder = self.default_builder
obj = BeautifulSoup(to_parse, builder=builder)
if compare_parsed_to is None:
compare_parsed_to = to_parse
self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
class HTMLTreeBuilderSmokeTest(object):
"""A basic test of a treebuilder's competence.
Any HTML treebuilder, present or future, should be able to pass
these tests. With invalid markup, there's room for interpretation,
and different parsers can handle it differently. But with the
markup in these tests, there's not much room for interpretation.
"""
def assertDoctypeHandled(self, doctype_fragment):
"""Assert that a given doctype string is handled correctly."""
doctype_str, soup = self._document_with_doctype(doctype_fragment)
# Make sure a Doctype object was created.
doctype = soup.contents[0]
self.assertEqual(doctype.__class__, Doctype)
self.assertEqual(doctype, doctype_fragment)
self.assertEqual(str(soup)[:len(doctype_str)], doctype_str)
# Make sure that the doctype was correctly associated with the
# parse tree and that the rest of the document parsed.
self.assertEqual(soup.p.contents[0], 'foo')
def _document_with_doctype(self, doctype_fragment):
"""Generate and parse a document with the given doctype."""
doctype = '<!DOCTYPE %s>' % doctype_fragment
markup = doctype + '\n<p>foo</p>'
soup = self.soup(markup)
return doctype, soup
def test_normal_doctypes(self):
"""Make sure normal, everyday HTML doctypes are handled correctly."""
self.assertDoctypeHandled("html")
self.assertDoctypeHandled(
'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_public_doctype_with_url(self):
doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
self.assertDoctypeHandled(doctype)
def test_system_doctype(self):
self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"')
def test_namespaced_system_doctype(self):
# We can handle a namespaced doctype with a system ID.
self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"')
def test_namespaced_public_doctype(self):
# Test a namespaced doctype with a public id.
self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"')
def test_real_xhtml_document(self):
"""A real XHTML document should come out more or less the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b""),
markup.replace(b"\n", b""))
def test_deepcopy(self):
"""Make sure you can copy the tree builder.
This is important because the builder is part of a
BeautifulSoup object, and we want to be able to copy that.
"""
copy.deepcopy(self.default_builder)
def test_p_tag_is_never_empty_element(self):
"""A <p> tag is never designated as an empty-element tag.
Even if the markup shows it as an empty-element tag, it
shouldn't be presented that way.
"""
soup = self.soup("<p/>")
self.assertFalse(soup.p.is_empty_element)
self.assertEqual(str(soup.p), "<p></p>")
def test_unclosed_tags_get_closed(self):
"""A tag that's not closed by the end of the document should be closed.
This applies to all tags except empty-element tags.
"""
self.assertSoupEquals("<p>", "<p></p>")
self.assertSoupEquals("<b>", "<b></b>")
self.assertSoupEquals("<br>", "<br/>")
def test_br_is_always_empty_element_tag(self):
"""A <br> tag is designated as an empty-element tag.
Some parsers treat <br></br> as one <br/> tag, some parsers as
two tags, but it should always be an empty-element tag.
"""
soup = self.soup("<br></br>")
self.assertTrue(soup.br.is_empty_element)
self.assertEqual(str(soup.br), "<br/>")
def test_nested_formatting_elements(self):
self.assertSoupEquals("<em><em></em></em>")
def test_comment(self):
# Comments are represented as Comment objects.
markup = "<p>foo<!--foobar-->baz</p>"
self.assertSoupEquals(markup)
soup = self.soup(markup)
comment = soup.find(text="foobar")
self.assertEqual(comment.__class__, Comment)
# The comment is properly integrated into the tree.
foo = soup.find(text="foo")
self.assertEqual(comment, foo.next_element)
baz = soup.find(text="baz")
self.assertEqual(comment, baz.previous_element)
def test_preserved_whitespace_in_pre_and_textarea(self):
"""Whitespace must be preserved in <pre> and <textarea> tags."""
self.assertSoupEquals("<pre> </pre>")
self.assertSoupEquals("<textarea> woo </textarea>")
def test_nested_inline_elements(self):
"""Inline elements can be nested indefinitely."""
b_tag = "<b>Inside a B tag</b>"
self.assertSoupEquals(b_tag)
nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
self.assertSoupEquals(nested_b_tag)
double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
self.assertSoupEquals(nested_b_tag)
def test_nested_block_level_elements(self):
"""Block elements can be nested."""
soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
blockquote = soup.blockquote
self.assertEqual(blockquote.p.b.string, 'Foo')
self.assertEqual(blockquote.b.string, 'Foo')
def test_correctly_nested_tables(self):
"""One table can go inside another one."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tr><td>Here\'s another table:'
'<table id="2"><tr><td>foo</td></tr></table>'
'</td></tr></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_deeply_nested_multivalued_attribute(self):
# html5lib can set the attributes of the same tag many times
# as it rearranges the tree. This has caused problems with
# multivalued attributes.
markup = '<table><div><div class="css"></div></div></table>'
soup = self.soup(markup)
self.assertEqual(["css"], soup.div.div['class'])
def test_angle_brackets_in_attribute_values_are_escaped(self):
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
def test_entities_in_attributes_converted_to_unicode(self):
expect = u'<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
def test_entities_in_text_converted_to_unicode(self):
expect = u'<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
def test_quot_entity_converted_to_quotation_mark(self):
self.assertSoupEquals("<p>I said "good day!"</p>",
'<p>I said "good day!"</p>')
def test_out_of_range_entity(self):
expect = u"\N{REPLACEMENT CHARACTER}"
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
def test_multipart_strings(self):
"Mostly to prevent a recurrence of a bug in the html5lib treebuilder."
soup = self.soup("<html><h2>\nfoo</h2><p></p></html>")
self.assertEqual("p", soup.h2.string.next_element.name)
self.assertEqual("p", soup.p.name)
def test_basic_namespaces(self):
"""Parsers don't need to *understand* namespaces, but at the
very least they should not choke on namespaces or lose
data."""
markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
soup = self.soup(markup)
self.assertEqual(markup, soup.encode())
html = soup.html
self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns'])
self.assertEqual(
'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml'])
self.assertEqual(
'http://www.w3.org/2000/svg', soup.html['xmlns:svg'])
def test_multivalued_attribute_value_becomes_list(self):
markup = b'<a class="foo bar">'
soup = self.soup(markup)
self.assertEqual(['foo', 'bar'], soup.a['class'])
#
# Generally speaking, tests below this point are more tests of
# Beautiful Soup than tests of the tree builders. But parsers are
# weird, so we run these tests separately for every tree builder
# to detect any differences between them.
#
def test_can_parse_unicode_document(self):
# A seemingly innocuous document... but it's in Unicode! And
# it contains characters that can't be represented in the
# encoding found in the declaration! The horror!
markup = u'<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>'
soup = self.soup(markup)
self.assertEqual(u'Sacr\xe9 bleu!', soup.body.string)
def test_soupstrainer(self):
"""Parsers should be able to work with SoupStrainers."""
strainer = SoupStrainer("b")
soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
parse_only=strainer)
self.assertEqual(soup.decode(), "<b>bold</b>")
def test_single_quote_attribute_values_become_double_quotes(self):
self.assertSoupEquals("<foo attr='bar'></foo>",
'<foo attr="bar"></foo>')
def test_attribute_values_with_nested_quotes_are_left_alone(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
self.assertSoupEquals(text)
def test_attribute_values_with_double_nested_quotes_get_quoted(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
soup = self.soup(text)
soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
self.assertSoupEquals(
soup.foo.decode(),
"""<foo attr="Brawls happen at "Bob\'s Bar"">a</foo>""")
def test_ampersand_in_attribute_value_gets_escaped(self):
self.assertSoupEquals('<this is="really messed up & stuff"></this>',
'<this is="really messed up & stuff"></this>')
self.assertSoupEquals(
'<a href="http://example.org?a=1&b=2;3">foo</a>',
'<a href="http://example.org?a=1&b=2;3">foo</a>')
def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
self.assertSoupEquals('<a href="http://example.org?a=1&b=2;3"></a>')
def test_entities_in_strings_converted_during_parsing(self):
# Both XML and HTML entities are converted to Unicode characters
# during parsing.
text = "<p><<sacré bleu!>></p>"
expected = u"<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>"
self.assertSoupEquals(text, expected)
def test_smart_quotes_converted_on_the_way_in(self):
# Microsoft smart quotes are converted to Unicode characters during
# parsing.
quote = b"<p>\x91Foo\x92</p>"
soup = self.soup(quote)
self.assertEqual(
soup.p.string,
u"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}")
def test_non_breaking_spaces_converted_on_the_way_in(self):
soup = self.soup("<a> </a>")
self.assertEqual(soup.a.string, u"\N{NO-BREAK SPACE}" * 2)
def test_entities_converted_on_the_way_out(self):
text = "<p><<sacré bleu!>></p>"
expected = u"<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>".encode("utf-8")
soup = self.soup(text)
self.assertEqual(soup.p.encode("utf-8"), expected)
def test_real_iso_latin_document(self):
# Smoke test of interrelated functionality, using an
# easy-to-understand document.
# Here it is in Unicode. Note that it claims to be in ISO-Latin-1.
unicode_html = u'<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>'
# That's because we're going to encode it into ISO-Latin-1, and use
# that to test.
iso_latin_html = unicode_html.encode("iso-8859-1")
# Parse the ISO-Latin-1 HTML.
soup = self.soup(iso_latin_html)
# Encode it to UTF-8.
result = soup.encode("utf-8")
# What do we expect the result to look like? Well, it would
# look like unicode_html, except that the META tag would say
# UTF-8 instead of ISO-Latin-1.
expected = unicode_html.replace("ISO-Latin-1", "utf-8")
# And, of course, it would be in UTF-8, not Unicode.
expected = expected.encode("utf-8")
# Ta-da!
self.assertEqual(result, expected)
def test_real_shift_jis_document(self):
# Smoke test to make sure the parser can handle a document in
# Shift-JIS encoding, without choking.
shift_jis_html = (
b'<html><head></head><body><pre>'
b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
b'</pre></body></html>')
unicode_html = shift_jis_html.decode("shift-jis")
soup = self.soup(unicode_html)
# Make sure the parse tree is correctly encoded to various
# encodings.
self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8"))
self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
def test_real_hebrew_document(self):
# A real-world test to make sure we can convert ISO-8859-9 (a
# Hebrew encoding) to UTF-8.
hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
soup = self.soup(
hebrew_document, from_encoding="iso8859-8")
self.assertEqual(soup.original_encoding, 'iso8859-8')
self.assertEqual(
soup.encode('utf-8'),
hebrew_document.decode("iso8859-8").encode("utf-8"))
def test_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is seemingly unaffected.
parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
content = parsed_meta['content']
self.assertEqual('text/html; charset=x-sjis', content)
# But that value is actually a ContentMetaAttributeValue object.
self.assertTrue(isinstance(content, ContentMetaAttributeValue))
# And it will take on a value that reflects its current
# encoding.
self.assertEqual('text/html; charset=utf8', content.encode("utf8"))
# For the rest of the story, see TestSubstitutions in
# test_tree.py.
def test_html5_style_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta id="encoding" charset="x-sjis" />')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is seemingly unaffected.
parsed_meta = soup.find('meta', id="encoding")
charset = parsed_meta['charset']
self.assertEqual('x-sjis', charset)
# But that value is actually a CharsetMetaAttributeValue object.
self.assertTrue(isinstance(charset, CharsetMetaAttributeValue))
# And it will take on a value that reflects its current
# encoding.
self.assertEqual('utf8', charset.encode("utf8"))
def test_tag_with_no_attributes_can_have_attributes_added(self):
data = self.soup("<a>text</a>")
data.a['foo'] = 'bar'
self.assertEqual('<a foo="bar">text</a>', data.a.decode())
class XMLTreeBuilderSmokeTest(object):
def test_docstring_generated(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>')
def test_real_xhtml_document(self):
"""A real XHTML document should come out *exactly* the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8"), markup)
def test_formatter_processes_script_tag_for_xml_documents(self):
doc = """
<script type="text/javascript">
</script>
"""
soup = BeautifulSoup(doc, "xml")
# lxml would have stripped this while parsing, but we can add
# it later.
soup.script.string = 'console.log("< < hey > > ");'
encoded = soup.encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_can_parse_unicode_document(self):
markup = u'<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
soup = self.soup(markup)
self.assertEqual(u'Sacr\xe9 bleu!', soup.root.string)
def test_popping_namespaced_tag(self):
markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>'
soup = self.soup(markup)
self.assertEqual(
unicode(soup.rss), markup)
def test_docstring_includes_correct_encoding(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode("latin1"),
b'<?xml version="1.0" encoding="latin1"?>\n<root/>')
def test_large_xml_document(self):
"""A large XML document should come out the same as it went in."""
markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>'
+ b'0' * (2**12)
+ b'</root>')
soup = self.soup(markup)
self.assertEqual(soup.encode("utf-8"), markup)
def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
self.assertSoupEquals("<p>", "<p/>")
self.assertSoupEquals("<p>foo</p>")
def test_namespaces_are_preserved(self):
markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
soup = self.soup(markup)
root = soup.root
self.assertEqual("http://example.com/", root['xmlns:a'])
self.assertEqual("http://example.net/", root['xmlns:b'])
def test_closing_namespaced_tag(self):
markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.p), markup)
def test_namespaced_attributes(self):
markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.foo), markup)
def test_namespaced_attributes_xml_namespace(self):
markup = '<foo xml:lang="fr">bar</foo>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.foo), markup)
class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
"""Smoke test for a tree builder that supports HTML5."""
def test_real_xhtml_document(self):
# Since XHTML is not HTML5, HTML5 parsers are not tested to handle
# XHTML documents in any particular way.
pass
def test_html_tags_have_namespace(self):
markup = "<a>"
soup = self.soup(markup)
self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace)
def test_svg_tags_have_namespace(self):
markup = '<svg><circle/></svg>'
soup = self.soup(markup)
namespace = "http://www.w3.org/2000/svg"
self.assertEqual(namespace, soup.svg.namespace)
self.assertEqual(namespace, soup.circle.namespace)
def test_mathml_tags_have_namespace(self):
markup = '<math><msqrt>5</msqrt></math>'
soup = self.soup(markup)
namespace = 'http://www.w3.org/1998/Math/MathML'
self.assertEqual(namespace, soup.math.namespace)
self.assertEqual(namespace, soup.msqrt.namespace)
def test_xml_declaration_becomes_comment(self):
markup = '<?xml version="1.0" encoding="utf-8"?><html></html>'
soup = self.soup(markup)
self.assertTrue(isinstance(soup.contents[0], Comment))
self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?')
self.assertEqual("html", soup.contents[0].next_element.name)
def skipIf(condition, reason):
def nothing(test, *args, **kwargs):
return None
def decorator(test_item):
if condition:
return nothing
else:
return test_item
return decorator
| gpl-3.0 |
wilhelmryan/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/packages/six.py | 2375 | 11628 | """Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
| lgpl-2.1 |
eul-721/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/encodings/hp_roman8.py | 647 | 7391 | """ Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py.
Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen)
Original source: LaserJet IIP Printer User's Manual HP part no
33471-90901, Hewlet-Packard, June 1989.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hp-roman8',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00a2: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00a3: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00a4: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00a5: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00a6: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a7: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00a8: 0x00b4, # ACUTE ACCENT
0x00a9: 0x02cb, # MODIFIER LETTER GRAVE ACCENT (Mandarin Chinese fourth tone)
0x00aa: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x00ab: 0x00a8, # DIAERESIS
0x00ac: 0x02dc, # SMALL TILDE
0x00ad: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ae: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00af: 0x20a4, # LIRA SIGN
0x00b0: 0x00af, # MACRON
0x00b1: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00b2: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00b3: 0x00b0, # DEGREE SIGN
0x00b4: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00b5: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x00b6: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00b7: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00b8: 0x00a1, # INVERTED EXCLAMATION MARK
0x00b9: 0x00bf, # INVERTED QUESTION MARK
0x00ba: 0x00a4, # CURRENCY SIGN
0x00bb: 0x00a3, # POUND SIGN
0x00bc: 0x00a5, # YEN SIGN
0x00bd: 0x00a7, # SECTION SIGN
0x00be: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00bf: 0x00a2, # CENT SIGN
0x00c0: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00c1: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00c2: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00c3: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00c4: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00c5: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x00c6: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00c7: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00c8: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x00c9: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x00ca: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x00cb: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x00cc: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x00cd: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ce: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x00cf: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00d0: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00d1: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00d2: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x00d3: 0x00c6, # LATIN CAPITAL LETTER AE
0x00d4: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x00d5: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00d6: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x00d7: 0x00e6, # LATIN SMALL LETTER AE
0x00d8: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00d9: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x00da: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00db: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dc: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x00dd: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x00de: 0x00df, # LATIN SMALL LETTER SHARP S (German)
0x00df: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e0: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00e1: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00e2: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00e3: 0x00d0, # LATIN CAPITAL LETTER ETH (Icelandic)
0x00e4: 0x00f0, # LATIN SMALL LETTER ETH (Icelandic)
0x00e5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00e6: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00e7: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e8: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e9: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00ea: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00eb: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00ec: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00ed: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ee: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
0x00ef: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00f0: 0x00de, # LATIN CAPITAL LETTER THORN (Icelandic)
0x00f1: 0x00fe, # LATIN SMALL LETTER THORN (Icelandic)
0x00f2: 0x00b7, # MIDDLE DOT
0x00f3: 0x00b5, # MICRO SIGN
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f6: 0x2014, # EM DASH
0x00f7: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00f8: 0x00bd, # VULGAR FRACTION ONE HALF
0x00f9: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00fa: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00fb: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00fc: 0x25a0, # BLACK SQUARE
0x00fd: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00fe: 0x00b1, # PLUS-MINUS SIGN
0x00ff: None,
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| gpl-2.0 |
cbingos/hongmafund | nvd3/stackedAreaChart.py | 5 | 3803 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from .NVD3Chart import NVD3Chart
class stackedAreaChart(NVD3Chart):
"""
The stacked area chart is identical to the area chart, except the areas are stacked
on top of each other, rather than overlapping. This can make the chart much easier to read.
.. image:: ../_static/screenshot/stackedAreaChart.png
Python example::
from nvd3 import stackedAreaChart
chart = stackedAreaChart(name='stackedAreaChart', height=400, width=400)
xdata = [100, 101, 102, 103, 104, 105, 106,]
ydata = [6, 11, 12, 7, 11, 10, 11]
ydata2 = [8, 20, 16, 12, 20, 28, 28]
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " min"}}
chart.add_serie(name="Serie 1", y=ydata, x=xdata, extra=extra_serie)
chart.add_serie(name="Serie 2", y=ydata2, x=xdata, extra=extra_serie)
chart.buildhtml()
Javascript generated::
data_stackedAreaChart = [{
"values":[
{
"y":9,
"x":100
},
{
"y":5,
"x":101
},
],
"key":"Serie 1",
"yAxis":"1"
},
{
"values":[
{
"y":18,
"x":100
},
{
"y":10,
"x":101
},
],
"key":"Serie 2",
"yAxis":"1"
}
]
nv.addGraph(function() {
var chart = nv.models.stackedAreaChart();
chart.xAxis
.tickFormat(d3.format(',.2f'));
chart.yAxis
.tickFormat(d3.format(',.2f'));
chart.tooltipContent(function(key, y, e, graph) {
var x = String(graph.point.x);
var y = String(graph.point.y);
if(key == 'serie 1'){
var y = 'There is ' + String(graph.point.y) + ' min';
}
if(key == 'serie 2'){
var y = 'There is ' + String(graph.point.y) + ' min';
}
tooltip_str = '<center><b>'+key+'</b></center>' + y + ' at ' + x;
return tooltip_str;
});
d3.select('#stackedAreaChart svg')
.datum(data_stackedAreaChart)
.transition()
.duration(500)
.call(chart);
return chart;
});
"""
def __init__(self, **kwargs):
NVD3Chart.__init__(self, **kwargs)
height = kwargs.get('height', 450)
width = kwargs.get('width', None)
if kwargs.get('x_is_date', False):
self.set_date_flag(True)
self.create_x_axis('xAxis',
format=kwargs.get('x_axis_format', '%d %b %Y'),
date=True)
self.set_custom_tooltip_flag(True)
else:
self.create_x_axis('xAxis', format=kwargs.get('x_axis_format', '.2f'))
self.create_y_axis('yAxis', format=kwargs.get('y_axis_format', '.2f'))
# must have a specified height, otherwise it superimposes both chars
if height:
self.set_graph_height(height)
if width:
self.set_graph_width(width)
| mit |
antonyc/django_mongo_cache | src/tools_mongodb_cache/cache.py | 2 | 12742 | # coding: utf-8
from __future__ import unicode_literals
import base64
import logging
import cPickle as pickle
from datetime import timedelta
from django.utils import timezone
from pymongo import errors as pymongo_errors
from pymongo import uri_parser
from django.core.exceptions import ImproperlyConfigured
from django.core.cache.backends.base import BaseCache
from .mongodb import LoggingCollection, MongoDBWrapper, ensure_indexes
logger = logging.getLogger(__name__)
def serialize_base64(value):
return base64.b64encode(
pickle.dumps(value, protocol=pickle.HIGHEST_PROTOCOL)
)
def deserialize_base64(serialized):
return pickle.loads(base64.b64decode(serialized))
class BadDataInCache(RuntimeError):
pass
_ensured_ttl_indexes = set() # Список коллекций, у которых точно висит TTL индекс в монге.
# Нужен здесь для того, чтобы делать ensure_indexes один раз для коллекции
# во время запуска воркера, а не на каждый запрос
UNKNOWN_MONGO_ERROR_MESSAGE = 'Unknown error from pymongo client: "%s"'
def convert_to_capped(cache, size_limit=500 * 1024 * 1024):
"""
Convert normal collection to capped collection
@type cache: Collection
@param cache: collection to convert
@type size_limit: int
@param size_limit: capped collection size limit. default = 500 MB
@raise: OperationFailure
"""
db = cache.database
collection_name = cache.collection_name
db.command("convertToCapped", collection_name, size=size_limit, check=True)
class MongoDBCache(BaseCache):
"""
Позволяет хранить джанго-кеши в монге.
Если вы хотите класть не только json-сериализуемые данные,
пропишите в CACHES в джанго-настройках.
'OPTIONS': {
'VALUES_ARE_JSON_SERIALIZEABLE': False,
}
Это отрицательно скажется на производительности.
"""
_collection = None
_database_name = None
# Атрибут класса! Не одного объекта.
# запоминаем, что монга была мертва и раз в N секунд забываем про это.
mongodb_is_dead_since = {}
# через какое время пробовать работать с монгой
_give_mongo_chance_every = 1 # seconds
# допускаются не-json сериализуемые значения в этом кеше.
json_serializeable_values = None
@property
def give_mongodb_chance(self):
"""
Вернуть True, если можно снова пробовать работать с монгой.
"""
if not self.mongodb_is_dead_since:
return True
dead_since = self.mongodb_is_dead_since['dead_since']
if dead_since + timedelta(seconds=self._give_mongo_chance_every) < timezone.now():
del self.mongodb_is_dead_since['dead_since']
return True
return False
def mongodb_became_dead(self):
"""
Пометить монгу как совсем мертвую.
"""
self.mongodb_is_dead_since['dead_since'] = timezone.now()
def __init__(self, location, params, mongodb=None):
"""
@type mongodb: инстанс подключения к монго
"""
self.collection_name = params['collection']
options = params.get('OPTIONS', {})
self.write_concern = options.get('WRITE_CONCERN', 1)
self.json_serializeable_values = options.get(
'VALUES_ARE_JSON_SERIALIZEABLE', True)
strategy = options.get('STRATEGY', 'NEAREST')
assert self.write_concern > -1
assert isinstance(self.collection_name, basestring)
if not location.startswith('mongodb://'):
raise ImproperlyConfigured('connection to mongo should start with mongodb://')
database = uri_parser.parse_uri(location)['database']
if not database:
raise ImproperlyConfigured('Specify DB like that mongodb://hosts/database_name')
self.mongodb = mongodb or MongoDBWrapper(
hosts=location,
strategy=strategy,
replica_set=options['replica_set'],
database_name=database
)
self.logger = logging.getLogger('mongo_requests')
super(MongoDBCache, self).__init__(params)
self._ensure_ttl_collection()
def collection(self):
"""
@rtype: Collection
"""
if self._collection is None:
self._collection = LoggingCollection(
getattr(self.mongodb.database, self.collection_name), logger
)
return self._collection
def set(self, key, value, timeout=None, version=None):
"""
Положить значение по ключу.
@rtype: bool
"""
assert isinstance(key, basestring)
if not self.give_mongodb_chance:
return False
# кастомный таймаут записи не должен превышать таймаут коллекции, в
# таком случае используем дефолтный таймаут коллекции
if timeout and timeout > self.default_timeout:
logger.warning('%s custom timeout can\'t be bigger than default %s',
self.collection_name, self.default_timeout)
timeout = None
if version is not None:
key = self.make_key(key, version)
try:
self.write_in_mongo(
key,
value if self.json_serializeable_values else serialize_base64(value),
timeout)
except pymongo_errors.PyMongoError as e:
self.logger.exception(UNKNOWN_MONGO_ERROR_MESSAGE, repr(e))
self.mongodb_became_dead()
return False
except pymongo_errors.BSONError as e:
self.logger.exception(UNKNOWN_MONGO_ERROR_MESSAGE, repr(e))
return False
return True
add = set
def write_in_mongo(self, key, value, timeout=None):
"""
Записать в монго значение.
@param key: ключ, по которому пишется запись. Хранится в поле "key"
@param value: значение, которое должно быть json-сериализуемым (!)
@type timeout: int
@param timeout: опциональный отдельный таймаут на запись, используемый вместо дефолтного
таймаута коллекции, в секундах.
"""
# в cPickle unicode лучше не слать, он дает ошибки для high unicode символов,
# к примеру для \xfc
self.collection().update(
{'key': key},
{
'key': key,
'value': value,
'status': timezone.now(),
'record_timeout': timeout
},
True,
w=self.write_concern,
)
def get(self, key, default=None, version=None):
"""
Получить объект из кеша.
Бросает BadDataInCache, если данные из кеша невозможно использовать.
"""
assert isinstance(key, basestring)
if not self.give_mongodb_chance:
return default
if version is not None:
key = self.make_key(key, version)
result = None
try:
result = self.read_from_mongo(key)
except pymongo_errors.PyMongoError as e:
self.logger.exception(UNKNOWN_MONGO_ERROR_MESSAGE, repr(e))
self.mongodb_became_dead()
except pymongo_errors.BSONError as e:
self.logger.exception(UNKNOWN_MONGO_ERROR_MESSAGE, repr(e))
if result is None:
return default
if not isinstance(result, dict):
raise BadDataInCache('Result is not a dict, but "{0}"'.format(
type(result))
)
if result.get('record_timeout', None):
rtimeout = int(result['record_timeout'])
# таймаут записи не должен быть больше таймаута коллекции, в
# этом случае приводим его к дефолтному
if rtimeout > self.default_timeout:
rtimeout = self.default_timeout
if timezone.now() > result['status'] + timedelta(seconds=rtimeout):
return default
result = result['value']
try:
if not self.json_serializeable_values:
result = deserialize_base64(result)
except:
self.logger.exception('Bad data in cache')
raise BadDataInCache('Failed to use data from cache')
return result
def read_from_mongo(self, key):
return self.collection().find_one(
{'key': key}
)
def delete(self, key, version=None):
self.collection().remove({
'key': self.make_key(key, version=version),
})
def delete_many(self, keys, version=None):
self.collection().remove({
'$or': [
{'key': self.make_key(key, version=version)} for key in keys
],
})
def clear(self):
self.collection().remove()
def is_capped(self):
"""
Return True if collection is capped
@rtype: bool
"""
return self.collection().options().get('capped', False)
def _ensure_ttl_collection(self):
"""
ensure collection has TTL index
"""
if self.collection_name not in _ensured_ttl_indexes:
try:
ensure_indexes(self.collection(), self.default_timeout)
except pymongo_errors.PyMongoError as exc:
logger.error(
'I failed to ensure indexes on "%s": "%s"',
self.collection_name, repr(exc)
)
else:
_ensured_ttl_indexes.add(self.collection_name)
class MongoFailSafeSessionCache(MongoDBCache):
"""
Работает с монгой, не обращая внимания на ошибки монги.
"""
MONGODB_ERROR_MESSAGE = 'Sessions unavailable because of mongodb error'
def add(self, key, value, timeout=None, version=None):
"""
Создать сессию, но не создавать пустую.
Не создает пустую сессию, чтобы лишний раз не ходить в хранилище.
"""
if not value:
self.logger.warning('Skipped creating empty session with value "%s"', repr(value))
return True
try:
return super(MongoFailSafeSessionCache, self).add(key, value, timeout, version)
except pymongo_errors.PyMongoError:
self.logger.exception(self.MONGODB_ERROR_MESSAGE)
return True
def set(self, key, value, timeout=None, version=None):
try:
return super(MongoFailSafeSessionCache, self).set(
key, value, timeout, version
)
except pymongo_errors.PyMongoError:
self.logger.exception(self.MONGODB_ERROR_MESSAGE)
return True
def delete(self, key, version=None):
try:
return super(MongoFailSafeSessionCache, self).delete(
key, version
)
except pymongo_errors.PyMongoError:
self.logger.exception(self.MONGODB_ERROR_MESSAGE)
return True
def get(self, key, default=None, version=None):
try:
return super(MongoFailSafeSessionCache, self).get(
key, default, version
)
except pymongo_errors.PyMongoError:
self.logger.exception(self.MONGODB_ERROR_MESSAGE)
return default
def _ensure_ttl_collection(self):
"""
Обертка для подавления ошибок, если монга отвалилась
"""
try:
super(MongoFailSafeSessionCache, self)._ensure_ttl_collection()
except pymongo_errors.PyMongoError:
self.logger.exception(self.MONGODB_ERROR_MESSAGE)
| mit |
mrquim/repository.mrquim | repo/plugin.video.salts/js2py/constructors/jsarray.py | 71 | 1280 | from ..base import *
@Js
def Array():
if len(arguments)==0 or len(arguments)>1:
return arguments.to_list()
a = arguments[0]
if isinstance(a, PyJsNumber):
length = a.to_uint32()
if length!=a.value:
raise MakeError('RangeError', 'Invalid array length')
temp = Js([])
temp.put('length', a)
return temp
return [a]
Array.create = Array
Array.own['length']['value'] = Js(1)
@Js
def isArray(arg):
return arg.Class=='Array'
Array.define_own_property('isArray', {'value': isArray,
'enumerable': False,
'writable': True,
'configurable': True})
Array.define_own_property('prototype', {'value': ArrayPrototype,
'enumerable': False,
'writable': False,
'configurable': False})
ArrayPrototype.define_own_property('constructor', {'value': Array,
'enumerable': False,
'writable': True,
'configurable': True}) | gpl-2.0 |
polimediaupv/edx-platform | common/lib/xmodule/xmodule/video_module/video_xfields.py | 46 | 8327 | """
XFields for video module.
"""
import datetime
from xblock.fields import Scope, String, Float, Boolean, List, Dict, DateTime
from xmodule.fields import RelativeTime
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class VideoFields(object):
"""Fields for `VideoModule` and `VideoDescriptor`."""
display_name = String(
help=_("The name students see. This name appears in the course ribbon and as a header for the video."),
display_name=_("Component Display Name"),
default="Video",
scope=Scope.settings
)
saved_video_position = RelativeTime(
help=_("Current position in the video."),
scope=Scope.user_state,
default=datetime.timedelta(seconds=0)
)
# TODO: This should be moved to Scope.content, but this will
# require data migration to support the old video module.
youtube_id_1_0 = String(
help=_("Optional, for older browsers: the YouTube ID for the normal speed video."),
display_name=_("YouTube ID"),
scope=Scope.settings,
default="3_yD_cEKoCk"
)
youtube_id_0_75 = String(
help=_("Optional, for older browsers: the YouTube ID for the .75x speed video."),
display_name=_("YouTube ID for .75x speed"),
scope=Scope.settings,
default=""
)
youtube_id_1_25 = String(
help=_("Optional, for older browsers: the YouTube ID for the 1.25x speed video."),
display_name=_("YouTube ID for 1.25x speed"),
scope=Scope.settings,
default=""
)
youtube_id_1_5 = String(
help=_("Optional, for older browsers: the YouTube ID for the 1.5x speed video."),
display_name=_("YouTube ID for 1.5x speed"),
scope=Scope.settings,
default=""
)
start_time = RelativeTime( # datetime.timedelta object
help=_(
"Time you want the video to start if you don't want the entire video to play. "
"Not supported in the native mobile app: the full video file will play. "
"Formatted as HH:MM:SS. The maximum value is 23:59:59."
),
display_name=_("Video Start Time"),
scope=Scope.settings,
default=datetime.timedelta(seconds=0)
)
end_time = RelativeTime( # datetime.timedelta object
help=_(
"Time you want the video to stop if you don't want the entire video to play. "
"Not supported in the native mobile app: the full video file will play. "
"Formatted as HH:MM:SS. The maximum value is 23:59:59."
),
display_name=_("Video Stop Time"),
scope=Scope.settings,
default=datetime.timedelta(seconds=0)
)
#front-end code of video player checks logical validity of (start_time, end_time) pair.
# `source` is deprecated field and should not be used in future.
# `download_video` is used instead.
source = String(
help=_("The external URL to download the video."),
display_name=_("Download Video"),
scope=Scope.settings,
default=""
)
download_video = Boolean(
help=_("Allow students to download versions of this video in different formats if they cannot use the edX video player or do not have access to YouTube. You must add at least one non-YouTube URL in the Video File URLs field."),
display_name=_("Video Download Allowed"),
scope=Scope.settings,
default=False
)
html5_sources = List(
help=_("The URL or URLs where you've posted non-YouTube versions of the video. Each URL must end in .mpeg, .mp4, .ogg, or .webm and cannot be a YouTube URL. (For browser compatibility, we strongly recommend .mp4 and .webm format.) Students will be able to view the first listed video that's compatible with the student's computer. To allow students to download these videos, set Video Download Allowed to True."),
display_name=_("Video File URLs"),
scope=Scope.settings,
)
track = String(
help=_("By default, students can download an .srt or .txt transcript when you set Download Transcript Allowed to True. If you want to provide a downloadable transcript in a different format, we recommend that you upload a handout by using the Upload a Handout field. If this isn't possible, you can post a transcript file on the Files & Uploads page or on the Internet, and then add the URL for the transcript here. Students see a link to download that transcript below the video."),
display_name=_("Downloadable Transcript URL"),
scope=Scope.settings,
default=''
)
download_track = Boolean(
help=_("Allow students to download the timed transcript. A link to download the file appears below the video. By default, the transcript is an .srt or .txt file. If you want to provide the transcript for download in a different format, upload a file by using the Upload Handout field."),
display_name=_("Download Transcript Allowed"),
scope=Scope.settings,
default=False
)
sub = String(
help=_("The default transcript for the video, from the Default Timed Transcript field on the Basic tab. This transcript should be in English. You don't have to change this setting."),
display_name=_("Default Timed Transcript"),
scope=Scope.settings,
default=""
)
show_captions = Boolean(
help=_("Specify whether the transcripts appear with the video by default."),
display_name=_("Show Transcript"),
scope=Scope.settings,
default=True
)
# Data format: {'de': 'german_translation', 'uk': 'ukrainian_translation'}
transcripts = Dict(
help=_("Add transcripts in different languages. Click below to specify a language and upload an .srt transcript file for that language."),
display_name=_("Transcript Languages"),
scope=Scope.settings,
default={}
)
transcript_language = String(
help=_("Preferred language for transcript."),
display_name=_("Preferred language for transcript"),
scope=Scope.preferences,
default="en"
)
transcript_download_format = String(
help=_("Transcript file format to download by user."),
scope=Scope.preferences,
values=[
# Translators: This is a type of file used for captioning in the video player.
{"display_name": _("SubRip (.srt) file"), "value": "srt"},
{"display_name": _("Text (.txt) file"), "value": "txt"}
],
default='srt',
)
speed = Float(
help=_("The last speed that the user specified for the video."),
scope=Scope.user_state
)
global_speed = Float(
help=_("The default speed for the video."),
scope=Scope.preferences,
default=1.0
)
youtube_is_available = Boolean(
help=_("Specify whether YouTube is available for the user."),
scope=Scope.user_info,
default=True
)
handout = String(
help=_("Upload a handout to accompany this video. Students can download the handout by clicking Download Handout under the video."),
display_name=_("Upload Handout"),
scope=Scope.settings,
)
only_on_web = Boolean(
help=_(
"Specify whether access to this video is limited to browsers only, or if it can be "
"accessed from other applications including mobile apps."
),
display_name="Video Available on Web Only",
scope=Scope.settings,
default=False
)
edx_video_id = String(
help=_("If you were assigned a Video ID by edX for the video to play in this component, enter the ID here. In this case, do not enter values in the Default Video URL, the Video File URLs, and the YouTube ID fields. If you were not assigned a Video ID, enter values in those other fields and ignore this field."), # pylint: disable=line-too-long
display_name=_("Video ID"),
scope=Scope.settings,
default="",
)
bumper_last_view_date = DateTime(
display_name=_("Date of the last view of the bumper"),
scope=Scope.preferences,
)
bumper_do_not_show_again = Boolean(
display_name=_("Do not show bumper again"),
scope=Scope.preferences,
default=False,
)
| agpl-3.0 |
ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/keymgr/single_key_mgr.py | 18 | 2526 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
An implementation of a key manager that returns a single key in response to
all invocations of get_key.
"""
from nova import exception
from nova.i18n import _
from nova.keymgr import mock_key_mgr
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SingleKeyManager(mock_key_mgr.MockKeyManager):
"""This key manager implementation supports all the methods specified by
the key manager interface. This implementation creates a single key in
response to all invocations of create_key. Side effects
(e.g., raising exceptions) for each method are handled as specified by
the key manager interface.
"""
def __init__(self):
LOG.warn(_('This key manager is insecure and is not recommended for '
'production deployments'))
super(SingleKeyManager, self).__init__()
self.key_id = '00000000-0000-0000-0000-000000000000'
self.key = self._generate_key(key_length=256)
# key should exist by default
self.keys[self.key_id] = self.key
def _generate_hex_key(self, **kwargs):
key_length = kwargs.get('key_length', 256)
return '0' * (key_length / 4) # hex digit => 4 bits
def _generate_key_id(self):
return self.key_id
def store_key(self, ctxt, key, **kwargs):
if key != self.key:
raise exception.KeyManagerError(
reason="cannot store arbitrary keys")
return super(SingleKeyManager, self).store_key(ctxt, key, **kwargs)
def delete_key(self, ctxt, key_id, **kwargs):
if ctxt is None:
raise exception.Forbidden()
if key_id != self.key_id:
raise exception.KeyManagerError(
reason="cannot delete non-existent key")
LOG.warn(_("Not deleting key %s"), key_id)
| gpl-2.0 |
alistairlow/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy.py | 7 | 10990 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy to export custom proto formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.decision_trees.proto import generic_tree_model_extensions_pb2
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import loader as saved_model_loader
from tensorflow.python.saved_model import tag_constants
def make_custom_export_strategy(name,
convert_fn,
feature_columns,
export_input_fn):
"""Makes custom exporter of GTFlow tree format.
Args:
name: A string, for the name of the export strategy.
convert_fn: A function that converts the tree proto to desired format and
saves it to the desired location. Can be None to skip conversion.
feature_columns: A list of feature columns.
export_input_fn: A function that takes no arguments and returns an
`InputFnOps`.
Returns:
An `ExportStrategy`.
"""
base_strategy = saved_model_export_utils.make_export_strategy(
serving_input_fn=export_input_fn)
input_fn = export_input_fn()
(sorted_feature_names, dense_floats, sparse_float_indices, _, _,
sparse_int_indices, _, _) = gbdt_batch.extract_features(
input_fn.features, feature_columns)
def export_fn(estimator, export_dir, checkpoint_path=None, eval_result=None):
"""A wrapper to export to SavedModel, and convert it to other formats."""
result_dir = base_strategy.export(estimator, export_dir,
checkpoint_path,
eval_result)
with ops.Graph().as_default() as graph:
with tf_session.Session(graph=graph) as sess:
saved_model_loader.load(
sess, [tag_constants.SERVING], result_dir)
# Note: This is GTFlow internal API and might change.
ensemble_model = graph.get_operation_by_name(
"ensemble_model/TreeEnsembleSerialize")
_, dfec_str = sess.run(ensemble_model.outputs)
dtec = tree_config_pb2.DecisionTreeEnsembleConfig()
dtec.ParseFromString(dfec_str)
# Export the result in the same folder as the saved model.
if convert_fn:
convert_fn(dtec, sorted_feature_names,
len(dense_floats),
len(sparse_float_indices),
len(sparse_int_indices), result_dir, eval_result)
feature_importances = _get_feature_importances(
dtec, sorted_feature_names,
len(dense_floats),
len(sparse_float_indices), len(sparse_int_indices))
sorted_by_importance = sorted(
feature_importances.items(), key=lambda x: -x[1])
assets_dir = os.path.join(result_dir, "assets.extra")
gfile.MakeDirs(assets_dir)
with gfile.GFile(os.path.join(assets_dir, "feature_importances"),
"w") as f:
f.write("\n".join("%s, %f" % (k, v) for k, v in sorted_by_importance))
return result_dir
return export_strategy.ExportStrategy(name, export_fn)
def convert_to_universal_format(dtec, sorted_feature_names,
num_dense, num_sparse_float,
num_sparse_int,
feature_name_to_proto=None):
"""Convert GTFlow trees to universal format."""
del num_sparse_int # unused.
model_and_features = generic_tree_model_pb2.ModelAndFeatures()
# TODO(jonasz): Feature descriptions should contain information about how each
# feature is processed before it's fed to the model (e.g. bucketing
# information). As of now, this serves as a list of features the model uses.
for feature_name in sorted_feature_names:
if not feature_name_to_proto:
model_and_features.features[feature_name].SetInParent()
else:
model_and_features.features[feature_name].CopyFrom(
feature_name_to_proto[feature_name])
model = model_and_features.model
model.ensemble.summation_combination_technique.SetInParent()
for tree_idx in range(len(dtec.trees)):
gtflow_tree = dtec.trees[tree_idx]
tree_weight = dtec.tree_weights[tree_idx]
member = model.ensemble.members.add()
member.submodel_id.value = tree_idx
tree = member.submodel.decision_tree
for node_idx in range(len(gtflow_tree.nodes)):
gtflow_node = gtflow_tree.nodes[node_idx]
node = tree.nodes.add()
node_type = gtflow_node.WhichOneof("node")
node.node_id.value = node_idx
if node_type == "leaf":
leaf = gtflow_node.leaf
if leaf.HasField("vector"):
for weight in leaf.vector.value:
new_value = node.leaf.vector.value.add()
new_value.float_value = weight * tree_weight
else:
for index, weight in zip(
leaf.sparse_vector.index, leaf.sparse_vector.value):
new_value = node.leaf.sparse_vector.sparse_value[index]
new_value.float_value = weight * tree_weight
else:
node = node.binary_node
# Binary nodes here.
if node_type == "dense_float_binary_split":
split = gtflow_node.dense_float_binary_split
feature_id = split.feature_column
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = sorted_feature_names[feature_id]
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "sparse_float_binary_split_default_left":
split = gtflow_node.sparse_float_binary_split_default_left.split
node.default_direction = (
generic_tree_model_pb2.BinaryNode.LEFT)
# TODO(nponomareva): adjust this id assignement when we allow multi-
# column sparse tensors.
feature_id = split.feature_column + num_dense
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = sorted_feature_names[feature_id]
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "sparse_float_binary_split_default_right":
split = gtflow_node.sparse_float_binary_split_default_right.split
node.default_direction = (
generic_tree_model_pb2.BinaryNode.RIGHT)
# TODO(nponomareva): adjust this id assignement when we allow multi-
# column sparse tensors.
feature_id = split.feature_column + num_dense
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = sorted_feature_names[feature_id]
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "categorical_id_binary_split":
split = gtflow_node.categorical_id_binary_split
node.default_direction = generic_tree_model_pb2.BinaryNode.RIGHT
feature_id = split.feature_column + num_dense + num_sparse_float
categorical_test = (
generic_tree_model_extensions_pb2.MatchingValuesTest())
categorical_test.feature_id.id.value = sorted_feature_names[
feature_id]
matching_id = categorical_test.value.add()
matching_id.int64_value = split.feature_id
node.custom_left_child_test.Pack(categorical_test)
else:
raise ValueError("Unexpected node type %s", node_type)
node.left_child_id.value = split.left_id
node.right_child_id.value = split.right_id
return model_and_features
def _get_feature_importances(dtec, feature_names, num_dense_floats,
num_sparse_float, num_sparse_int):
"""Export the feature importance per feature column."""
del num_sparse_int # Unused.
sums = collections.defaultdict(lambda: 0)
for tree_idx in range(len(dtec.trees)):
tree = dtec.trees[tree_idx]
for tree_node in tree.nodes:
node_type = tree_node.WhichOneof("node")
if node_type == "dense_float_binary_split":
split = tree_node.dense_float_binary_split
split_column = feature_names[split.feature_column]
elif node_type == "sparse_float_binary_split_default_left":
split = tree_node.sparse_float_binary_split_default_left.split
split_column = feature_names[split.feature_column + num_dense_floats]
elif node_type == "sparse_float_binary_split_default_right":
split = tree_node.sparse_float_binary_split_default_right.split
split_column = feature_names[split.feature_column + num_dense_floats]
elif node_type == "categorical_id_binary_split":
split = tree_node.categorical_id_binary_split
split_column = feature_names[split.feature_column + num_dense_floats +
num_sparse_float]
elif node_type == "categorical_id_set_membership_binary_split":
split = tree_node.categorical_id_set_membership_binary_split
split_column = feature_names[split.feature_column + num_dense_floats +
num_sparse_float]
elif node_type == "leaf":
assert tree_node.node_metadata.gain == 0
continue
else:
raise ValueError("Unexpected split type %s", node_type)
# Apply shrinkage factor. It is important since it is not always uniform
# across different trees.
sums[split_column] += (
tree_node.node_metadata.gain * dtec.tree_weights[tree_idx])
return dict(sums)
| apache-2.0 |
mbohlool/client-python | kubernetes/client/models/v2beta1_horizontal_pod_autoscaler_condition.py | 1 | 7109 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V2beta1HorizontalPodAutoscalerCondition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None):
"""
V2beta1HorizontalPodAutoscalerCondition - a model defined in Swagger
"""
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""
Gets the last_transition_time of this V2beta1HorizontalPodAutoscalerCondition.
lastTransitionTime is the last time the condition transitioned from one status to another
:return: The last_transition_time of this V2beta1HorizontalPodAutoscalerCondition.
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""
Sets the last_transition_time of this V2beta1HorizontalPodAutoscalerCondition.
lastTransitionTime is the last time the condition transitioned from one status to another
:param last_transition_time: The last_transition_time of this V2beta1HorizontalPodAutoscalerCondition.
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""
Gets the message of this V2beta1HorizontalPodAutoscalerCondition.
message is a human-readable explanation containing details about the transition
:return: The message of this V2beta1HorizontalPodAutoscalerCondition.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this V2beta1HorizontalPodAutoscalerCondition.
message is a human-readable explanation containing details about the transition
:param message: The message of this V2beta1HorizontalPodAutoscalerCondition.
:type: str
"""
self._message = message
@property
def reason(self):
"""
Gets the reason of this V2beta1HorizontalPodAutoscalerCondition.
reason is the reason for the condition's last transition.
:return: The reason of this V2beta1HorizontalPodAutoscalerCondition.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this V2beta1HorizontalPodAutoscalerCondition.
reason is the reason for the condition's last transition.
:param reason: The reason of this V2beta1HorizontalPodAutoscalerCondition.
:type: str
"""
self._reason = reason
@property
def status(self):
"""
Gets the status of this V2beta1HorizontalPodAutoscalerCondition.
status is the status of the condition (True, False, Unknown)
:return: The status of this V2beta1HorizontalPodAutoscalerCondition.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V2beta1HorizontalPodAutoscalerCondition.
status is the status of the condition (True, False, Unknown)
:param status: The status of this V2beta1HorizontalPodAutoscalerCondition.
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
self._status = status
@property
def type(self):
"""
Gets the type of this V2beta1HorizontalPodAutoscalerCondition.
type describes the current condition
:return: The type of this V2beta1HorizontalPodAutoscalerCondition.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V2beta1HorizontalPodAutoscalerCondition.
type describes the current condition
:param type: The type of this V2beta1HorizontalPodAutoscalerCondition.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V2beta1HorizontalPodAutoscalerCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
phonelab/android_kernel | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
tinchoss/Python_Android | python/src/Mac/Demo/PICTbrowse/oldPICTbrowse.py | 34 | 4689 | """browsepict - Display all "PICT" resources found"""
import FrameWork
import EasyDialogs
from Carbon import Res
from Carbon import Qd
from Carbon import Win
from Carbon import List
import struct
import macresource
#
# Resource definitions
ID_MAIN=512
MAIN_LIST=1
MAIN_SHOW=2
# Where is the picture window?
LEFT=200
TOP=64
def main():
macresource.need('DLOG', ID_MAIN, "oldPICTbrowse.rsrc")
PICTbrowse()
class PICTbrowse(FrameWork.Application):
def __init__(self):
# First init menus, etc.
FrameWork.Application.__init__(self)
# Next create our dialog
self.main_dialog = MyDialog(self)
# Now open the dialog
contents = self.findPICTresources()
self.main_dialog.open(ID_MAIN, contents)
# Finally, go into the event loop
self.mainloop()
def makeusermenus(self):
self.filemenu = m = FrameWork.Menu(self.menubar, "File")
self.quititem = FrameWork.MenuItem(m, "Quit", "Q", self.quit)
def quit(self, *args):
self._quit()
def showPICT(self, resid):
w = PICTwindow(self)
w.open(resid)
#EasyDialogs.Message('Show PICT %r' % (resid,))
def findPICTresources(self):
num = Res.CountResources('PICT')
rv = []
for i in range(1, num+1):
Res.SetResLoad(0)
try:
r = Res.GetIndResource('PICT', i)
finally:
Res.SetResLoad(1)
id, type, name = r.GetResInfo()
rv.append((id, name))
return rv
class PICTwindow(FrameWork.Window):
def open(self, (resid, resname)):
if not resname:
resname = '#%r' % (resid,)
self.resid = resid
picture = Qd.GetPicture(self.resid)
# Get rect for picture
print repr(picture.data[:16])
sz, t, l, b, r = struct.unpack('hhhhh', picture.data[:10])
print 'pict:', t, l, b, r
width = r-l
height = b-t
if width < 64: width = 64
elif width > 480: width = 480
if height < 64: height = 64
elif height > 320: height = 320
bounds = (LEFT, TOP, LEFT+width, TOP+height)
print 'bounds:', bounds
self.wid = Win.NewWindow(bounds, resname, 1, 0, -1, 1, 0)
self.wid.SetWindowPic(picture)
self.do_postopen()
class MyDialog(FrameWork.DialogWindow):
"Main dialog window for PICTbrowse"
def open(self, id, contents):
self.id = id
FrameWork.DialogWindow.open(self, ID_MAIN)
self.dlg.SetDialogDefaultItem(MAIN_SHOW)
tp, h, rect = self.dlg.GetDialogItem(MAIN_LIST)
rect2 = rect[0]+1, rect[1]+1, rect[2]-17, rect[3]-17 # Scroll bar space
self.list = List.LNew(rect2, (0, 0, 1, len(contents)), (0,0), 0, self.wid,
0, 1, 1, 1)
self.contents = contents
self.setlist()
def setlist(self):
self.list.LDelRow(0, 0)
self.list.LSetDrawingMode(0)
if self.contents:
self.list.LAddRow(len(self.contents), 0)
for i in range(len(self.contents)):
v = repr(self.contents[i][0])
if self.contents[i][1]:
v = v + '"' + self.contents[i][1] + '"'
self.list.LSetCell(v, (0, i))
self.list.LSetDrawingMode(1)
self.list.LUpdate(self.wid.GetWindowPort().visRgn)
def do_listhit(self, event):
(what, message, when, where, modifiers) = event
Qd.SetPort(self.wid)
where = Qd.GlobalToLocal(where)
print 'LISTHIT', where
if self.list.LClick(where, modifiers):
self.do_show()
def getselection(self):
items = []
point = (0,0)
while 1:
ok, point = self.list.LGetSelect(1, point)
if not ok:
break
items.append(point[1])
point = point[0], point[1]+1
values = []
for i in items:
values.append(self.contents[i])
return values
def do_show(self, *args):
selection = self.getselection()
for resid in selection:
self.parent.showPICT(resid)
def do_rawupdate(self, window, event):
tp, h, rect = self.dlg.GetDialogItem(MAIN_LIST)
Qd.SetPort(self.wid)
Qd.FrameRect(rect)
self.list.LUpdate(self.wid.GetWindowPort().visRgn)
def do_activate(self, activate, event):
self.list.LActivate(activate)
def do_close(self):
self.close()
def do_itemhit(self, item, event):
if item == MAIN_LIST:
self.do_listhit(event)
if item == MAIN_SHOW:
self.do_show()
main()
| apache-2.0 |
code-google-com/rad2py | ide2py/simplejsonrpc.py | 8 | 4860 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Pythonic simple JSON RPC Client implementation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "0.05"
import urllib
from xmlrpclib import Transport, SafeTransport
from cStringIO import StringIO
import random
import sys
try:
import gluon.contrib.simplejson as json # try web2py json serializer
except ImportError:
try:
import json # try stdlib (py2.6)
except:
import simplejson as json # try external module
class JSONRPCError(RuntimeError):
"Error object for remote procedure call fail"
def __init__(self, code, message, data=None):
value = "%s: %s\n%s" % (code, message, '\n'.join(data))
RuntimeError.__init__(self, value)
self.code = code
self.message = message
self.data = data
class JSONDummyParser:
"json wrapper for xmlrpclib parser interfase"
def __init__(self):
self.buf = StringIO()
def feed(self, data):
self.buf.write(data)
def close(self):
return self.buf.getvalue()
class JSONTransportMixin:
"json wrapper for xmlrpclib transport interfase"
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "application/json")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
# todo: add gzip compression
def getparser(self):
# get parser and unmarshaller
parser = JSONDummyParser()
return parser, parser
class JSONTransport(JSONTransportMixin, Transport):
pass
class JSONSafeTransport(JSONTransportMixin, SafeTransport):
pass
class ServerProxy(object):
"JSON RPC Simple Client Service Proxy"
def __init__(self, uri, transport=None, encoding=None, verbose=0):
self.location = uri # server location (url)
self.trace = verbose # show debug messages
self.exceptions = True # raise errors? (JSONRPCError)
self.timeout = None
self.json_request = self.json_response = ''
type, uri = urllib.splittype(uri)
if type not in ("http", "https"):
raise IOError, "unsupported JSON-RPC protocol"
self.__host, self.__handler = urllib.splithost(uri)
if transport is None:
if type == "https":
transport = JSONSafeTransport()
else:
transport = JSONTransport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
def __getattr__(self, attr):
"pseudo method that can be called"
return lambda *args: self.call(attr, *args)
def call(self, method, *args):
"JSON RPC communication (method invocation)"
# build data sent to the service
request_id = random.randint(0, sys.maxint)
data = {'id': request_id, 'method': method, 'params': args, }
request = json.dumps(data)
# make HTTP request (retry if connection is lost)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
# store plain request and response for further debugging
self.json_request = request
self.json_response = response
# parse json data coming from service
# {'version': '1.1', 'id': id, 'result': result, 'error': None}
response = json.loads(response)
if response['id'] != request_id:
raise JSONRPCError(0, "JSON Request ID != Response ID")
self.error = response.get('error', {})
if self.error and self.exceptions:
raise JSONRPCError(self.error.get('code', 0),
self.error.get('message', ''),
self.error.get('data', None))
return response.get('result')
ServiceProxy = ServerProxy
if __name__ == "__main__":
# basic tests:
location = "http://www.web2py.com.ar/webservices/sample/call/jsonrpc"
client = ServerProxy(location, verbose='--verbose' in sys.argv,)
print client.add(1, 2)
| gpl-3.0 |
davehunt/kuma | vendor/packages/translate/lang/test_nqo.py | 26 | 1743 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.lang import factory
def test_punctranslate():
"""Tests that we can translate punctuation."""
language = factory.getlanguage('nqo')
assert language.punctranslate(u"") == u""
assert language.punctranslate(u"abc efg") == u"abc efg"
assert language.punctranslate(u"abc efg.") == u"abc efg."
assert language.punctranslate(u"abc efg!") == u"abc efg߹"
assert language.punctranslate(u"abc, efg; d?") == u"abc߸ efg؛ d؟"
# See https://github.com/translate/translate/issues/1819
assert language.punctranslate(u"It is called “abc”") == u"It is called ”abc“"
def test_sentences():
"""Tests basic functionality of sentence segmentation."""
language = factory.getlanguage('nqo')
sentences = language.sentences(u"")
assert sentences == []
# this text probably does not make sense, I just copied it from Firefox
# translation and added some punctuation marks
sentences = language.sentences(u"ߡߍ߲ ߠߎ߬ ߦߋ߫ ߓߊ߯ߙߊ߫ ߟߊ߫ ߢߐ߲߮ ߝߍ߬ ߞߊ߬ ߓߟߐߟߐ ߟߊߞߊ߬ߣߍ߲ ߕߏ߫. ߖߊ߬ߡߊ ߣߌ߫ ߓߍ߯ ߛߊ߬ߥߏ ߘߐ߫.")
print(sentences)
assert sentences == [u"ߡߍ߲ ߠߎ߬ ߦߋ߫ ߓߊ߯ߙߊ߫ ߟߊ߫ ߢߐ߲߮ ߝߍ߬ ߞߊ߬ ߓߟߐߟߐ ߟߊߞߊ߬ߣߍ߲ ߕߏ߫.", u"ߖߊ߬ߡߊ ߣߌ߫ ߓߍ߯ ߛߊ߬ߥߏ ߘߐ߫."]
sentences = language.sentences(u"ߡߍ߲ ߠߎ߬ ߦߋ߫ ߓߊ߯ߙߊ߫ ߟߊ߫ ߢߐ߲߮ ߝߍ߬ ߞߊ߬ ߓߟߐߟߐ ߟߊߞߊ߬ߣߍ߲ ߕߏ߫? ߖߊ߬ߡߊ ߣߌ߫ ߓߍ߯ ߛߊ߬ߥߏ ߘߐ߫.")
print(sentences)
assert sentences == [u"ߡߍ߲ ߠߎ߬ ߦߋ߫ ߓߊ߯ߙߊ߫ ߟߊ߫ ߢߐ߲߮ ߝߍ߬ ߞߊ߬ ߓߟߐߟߐ ߟߊߞߊ߬ߣߍ߲ ߕߏ߫?", u"ߖߊ߬ߡߊ ߣߌ߫ ߓߍ߯ ߛߊ߬ߥߏ ߘߐ߫."]
| mpl-2.0 |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Salesforce/Passwords/ResetPassword.py | 5 | 5475 | # -*- coding: utf-8 -*-
###############################################################################
#
# ResetPassword
# Resets a user's password to new randomized password.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ResetPassword(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ResetPassword Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ResetPassword, self).__init__(temboo_session, '/Library/Salesforce/Passwords/ResetPassword')
def new_input_set(self):
return ResetPasswordInputSet()
def _make_result_set(self, result, path):
return ResetPasswordResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ResetPasswordChoreographyExecution(session, exec_id, path)
class ResetPasswordInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ResetPassword
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(ResetPasswordInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Salesforce. Required unless providing a valid AccessToken.)
"""
super(ResetPasswordInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Salesforce. Required unless providing a valid AccessToken.)
"""
super(ResetPasswordInputSet, self)._set_input('ClientSecret', value)
def set_ID(self, value):
"""
Set the value of the ID input for this Choreo. ((required, string) The ID of the user whos password you are resetting.)
"""
super(ResetPasswordInputSet, self)._set_input('ID', value)
def set_InstanceName(self, value):
"""
Set the value of the InstanceName input for this Choreo. ((required, string) The server url prefix that indicates which instance your Salesforce account is on (e.g. na1, na2, na3, etc).)
"""
super(ResetPasswordInputSet, self)._set_input('InstanceName', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(ResetPasswordInputSet, self)._set_input('RefreshToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(ResetPasswordInputSet, self)._set_input('ResponseFormat', value)
class ResetPasswordResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ResetPassword Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Salesforce.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
def get_NewPassword(self):
"""
Retrieve the value for the "NewPassword" output from this Choreo execution. ((string) New password returned from Salesforce.)
"""
return self._output.get('NewPassword', None)
class ResetPasswordChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ResetPasswordResultSet(response, path)
| gpl-3.0 |
bmaluenda/switch | examples/3zone_toy_stochastic_PySP/ReferenceModel.py | 1 | 3880 | # Copyright 2016 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
Generate a model for use with the PySP pyomo module, either for use with the
runef or runph commands. Both these scripts require a single .py file that
creates a pyomo model object named "model".
The inputs_dir parameter must match the inputs directory being used for the
current simulation.
The example in which this model generator is framed doesn't use the
fuel_markets module. This script is tailored to treat all the Switch model's
annual costs as resulting from the first stage decisions and the timepoint
costs as product of the second stage decision variables. This ReferenceModel
will produce incorrect results if the fuel_markets module is included in the
simulation, which includes fuel costs in the annual components of the
objective function. That would include second stage decisions in the
calculation of the first stage costs, resulting in different RootNode costs
per scenario, which is incongruent.
"""
inputs_dir = "inputs"
###########################################################
import switch_mod.utilities as utilities
import switch_mod.financials as financials
import sys, os
from pyomo.environ import *
print "loading model..."
try:
module_fh = open(os.path.join(inputs_dir, 'modules.txt'), 'r')
except IOError, exc:
sys.exit('Failed to open input file: {}'.format(exc))
module_list = [line.rstrip('\n') for line in module_fh]
module_list.insert(0,'switch_mod')
model = utilities.create_model(module_list, args=[])
# The following code augments the model object with Expressions for the
# Stage costs, which both runef and runph scripts need in order to build
# the stochastic objective function. In this particular example, only
# two stages are considered: Investment and Operation. These Expression
# names must match exactly the StageCostVariable parameter defined for
# each Stage in the ScenarioStructure.dat file.
# The following two functions are defined explicitely, because since they
# are nested inside another function in the financials module, they can't
# be called from this script.
def calc_tp_costs_in_period(m, t):
return sum(
getattr(m, tp_cost)[t] * m.tp_weight_in_year[t]
for tp_cost in m.cost_components_tp)
def calc_annual_costs_in_period(m, p):
return sum(
getattr(m, annual_cost)[p]
for annual_cost in m.cost_components_annual)
# In the current version of Switch-Pyomo, all annual costs are defined
# by First Stage decision variables, such as fixed O&M and capital
# costs, which are caused by the BuildProj, BuildTrans and BuildLocalTD
# variables, all of which are considered as first stage decisions in this
# two-stage example.
# Likewise, all timepoint defined costs are caused by Second Stage decision
# variables, such as variable O&M and fuel use costs, which are caused by
# the DispatchProj variable. These will be considered as second stage
# decisions in this example.
# Further comments on this are written in the Readme file.
model.InvestmentCost = Expression(rule=lambda m: sum(
calc_annual_costs_in_period(m, p) * financials.uniform_series_to_present_value(
m.discount_rate, m.period_length_years[p]) * financials.future_to_present_value(
m.discount_rate, (m.period_start[p] - m.base_financial_year)) for p in m.PERIODS))
model.OperationCost = Expression(rule=lambda m: sum(
sum(calc_tp_costs_in_period(m, t) for t in m.PERIOD_TPS[p]) * financials.uniform_series_to_present_value(
m.discount_rate, m.period_length_years[p]) * financials.future_to_present_value(
m.discount_rate, (m.period_start[p] - m.base_financial_year)) for p in m.PERIODS))
print "model successfully loaded..." | apache-2.0 |
tgoodlet/pytest | doc/en/example/multipython.py | 8 | 1748 | """
module containing a parametrized tests testing cross-python
serialization via the pickle module.
"""
import py
import pytest
import _pytest._code
pythonlist = ['python2.6', 'python2.7', 'python3.4', 'python3.5']
@pytest.fixture(params=pythonlist)
def python1(request, tmpdir):
picklefile = tmpdir.join("data.pickle")
return Python(request.param, picklefile)
@pytest.fixture(params=pythonlist)
def python2(request, python1):
return Python(request.param, python1.picklefile)
class Python:
def __init__(self, version, picklefile):
self.pythonpath = py.path.local.sysfind(version)
if not self.pythonpath:
pytest.skip("%r not found" %(version,))
self.picklefile = picklefile
def dumps(self, obj):
dumpfile = self.picklefile.dirpath("dump.py")
dumpfile.write(_pytest._code.Source("""
import pickle
f = open(%r, 'wb')
s = pickle.dump(%r, f, protocol=2)
f.close()
""" % (str(self.picklefile), obj)))
py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile))
def load_and_is_true(self, expression):
loadfile = self.picklefile.dirpath("load.py")
loadfile.write(_pytest._code.Source("""
import pickle
f = open(%r, 'rb')
obj = pickle.load(f)
f.close()
res = eval(%r)
if not res:
raise SystemExit(1)
""" % (str(self.picklefile), expression)))
print (loadfile)
py.process.cmdexec("%s %s" %(self.pythonpath, loadfile))
@pytest.mark.parametrize("obj", [42, {}, {1:3},])
def test_basic_objects(python1, python2, obj):
python1.dumps(obj)
python2.load_and_is_true("obj == %s" % obj)
| mit |
bslatkin/8-bits | appengine-mapreduce/python/src/mapreduce/file_formats.py | 3 | 13599 | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define file formats."""
# pylint: disable=g-bad-name
__all__ = ['FileFormat',
'FORMATS']
import StringIO
import zipfile
class FileFormat(object):
"""FileFormat can operate/iterate on files of a specific format.
Life cycle of FileFormat:
1. Two ways that FileFormat is created: file_format_root.split creates
FileFormat from scratch. FileFormatRoot.from_json creates FileFormat
from serialized json str. Either way, it is associated with a
FileFormatRoot. It should never be instantiated directly.
2. Root acts as a coordinator among FileFormats. Root initializes
its many fields so that FileFormat knows how to iterate over its inputs.
3. Its next() method is used to iterate.
4. It keeps iterating until either root calls its to_json() or root
sends it a StopIteration.
How to define a new format:
1. Subclass this.
2. Override NAME and ARGUMENTS. file_format_parser._FileFormatParser
uses them to validate a format string contains only legal
names and arguments.
3. Optionally override preprocess(). See method doc.
4. Override get_next(). Used by next() to fetch the next content to
return. See method.
5. Optionally override split() if this format supports it. See method.
6. Write unit tests. Tricky logics (to/from_json, advance
current input file) are shared. Thus as long as you respected
get_next()'s pre/post conditions, tests are very simple.
7. Register your format at FORMATS.
Attributes:
ARGUMENTS: a set of acceptable arguments to this format. Used for parsing
this format.
NAME: the name of this format. Used for parsing this format.
"""
ARGUMENTS = set()
NAME = '_file'
# Json Properties.
_KWARGS = 'kwargs'
_RANGE = 'index_range'
_FORMAT = 'name'
_PREVIOUS_INDEX = 'previous_index'
def __init__(self,
index,
index_range=None,
**kwargs):
# pylint: disable=g-doc-args
"""Initialize.
Args:
index: the index of the subfile to read from the current file.
index_range: a tuple [start_index, end_index) that if defined, should
bound index. When index is end_index, current file is consumed.
kwargs: kwargs for a specific FileFormat. What arguments are accepted
and their semantics depend on each subclass's interpretation.
Raises:
ValueError: if some argument is not expected by the format.
"""
for k in kwargs:
if k not in self.ARGUMENTS:
raise ValueError('Illegal argument %s' % k)
self._kwargs = kwargs
self._index = index
self._previous_index = index
# Set by file_format_root._RootFileStream
self._range = index_range
self._input_files_stream = None
self._cache = {}
def get_current_file(self):
"""Get the current file to iterate upon.
Returns:
A Python file object. This file is already seeked to the position from
last iteration. If read raises EOF, that means the file is exhausted.
"""
return self._input_files_stream.current
def get_index(self):
"""Get index.
If the format is an archive format, get_index() tells the format which
subfile from current file should it process. This value is maintained
across pickles and resets to 0 when a new file starts.
Returns:
index of the subfile to process from current file.
"""
return self._index
def increment_index(self):
"""Increment index.
Increment index value after finished processing the current subfile from
current file.
"""
self._index += 1
def get_cache(self):
"""Get cache to store expensive objects.
Some formats need expensive initialization to even start iteration.
They can store the initialized objects into the cache and try to retrieve
the objects from the cache at later iterations.
For example, a zip format needs to create a ZipFile object to iterate over
the zipfile. It can avoid doing so on every "next" call by storing the
ZipFile into cache.
Cache does not guarantee persistence. It is cleared at pickles.
It is also intentionally cleared after the currently iterated file is
entirely consumed.
Returns:
A dict to store temporary objects.
"""
return self._cache
@classmethod
def default_instance(cls, **kwargs):
# pylint: disable=g-doc-args
"""Create an default instance of FileFormat.
Used by parser to create default instances.
Args:
kwargs: kwargs parser parsed from user input.
Returns:
A default instance of FileFormat.
"""
return cls(0, **kwargs)
def __repr__(self):
return str(self.to_json())
def __str__(self):
result = self.NAME
if self._kwargs:
result += (
'(' +
','.join(k + '=' + v for k, v in sorted(self._kwargs.iteritems())) +
')')
return result
def checkpoint(self):
"""Save _index before updating it to support potential rollback."""
self._previous_index = self._index
def to_json(self):
"""Serialize states to a json compatible structure."""
return {self._KWARGS: self._kwargs,
self._RANGE: self._range,
self._FORMAT: self.NAME,
self._PREVIOUS_INDEX: self._previous_index}
@classmethod
def from_json(cls, json):
"""Deserialize from json compatible structure."""
return cls(json[cls._PREVIOUS_INDEX], json[cls._RANGE], **json[cls._KWARGS])
@classmethod
def can_split(cls):
"""Indicates whether this format support splitting within a file boundary.
Returns:
True if a FileFormat allows its inputs to be splitted into
different shards.
"""
try:
cls.split(0, 0, None, {})
except NotImplementedError:
return False
return True
@classmethod
# pylint: disable-msg=W0613
def split(cls, desired_size, start_index, input_file, cache):
"""Splits a single chunk of desired_size from file.
FileFormatRoot uses this method to ask FileFormat how to split
one file of this format.
This method takes an opened file and a start_index. If file
size is bigger than desired_size, the method determines a chunk of the
file whose size is close to desired_size. The chuck is indicated by
[start_index, end_index). If the file is smaller than desired_size,
the chunk will include the rest of the input_file.
This method also indicates how many bytes are consumed by this chunk
by returning size_left to the caller.
Args:
desired_size: desired number of bytes for this split. Positive int.
start_index: the index to start this split. The index is not necessarily
an offset. In zipfile, for example, it's the index of the member file
in the archive. Non negative int.
input_file: opened Files API file to split. Do not close this file.
cache: a dict to cache any object over multiple calls if needed.
Returns:
Returns a tuple of (size_left, end_index). If end_index equals
start_index, the file is fully split.
"""
raise NotImplementedError('split is not implemented for %s.' %
cls.__name__)
def __iter__(self):
return self
def preprocess(self, file_object):
"""Does preprocessing on the file-like object and returns another one.
Normally a FileFormat directly reads from the file returned by
get_current_file(). But some formats need to preprocess that file entirely
before iteration can starts (e.g. text formats need to decode first).
Args:
file_object: read from this object and process its content.
Returns:
a file-like object containing processed contents. This file object will
be returned by get_current_file() instead. If the returned object
is newly created, close the old one.
"""
return file_object
def next(self):
"""Returns a file-like object containing next content.
Returns:
A file-like object containing next content.
Raises:
ValueError: if content is of none str type.
"""
result = None
try:
# Limit _index by _range.
if self._range is not None:
if self._index < self._range[0]:
self._index = self._range[0]
elif self._index >= self._range[1]:
raise EOFError()
self._input_files_stream.checkpoint()
self.checkpoint()
result = self.get_next()
except EOFError:
self._input_files_stream.advance()
self._index = 0
self._cache = {}
return self.next()
if isinstance(result, str):
result = StringIO.StringIO(result)
elif isinstance(result, unicode):
raise ValueError('%s can not return unicode object.' %
self.__class__.__name__)
return result
def get_next(self):
"""Finds the next content to return.
Expected steps of any implementation:
1. Call get_current_file() to get the file to iterate on.
2. If nothing is read, raise EOFError. Otherwise, process the
contents read in anyway. _kwargs is guaranteed to be a dict
containing all arguments and values specified by user.
3. If the format is an archive format, use get_index() to
see which subfile to read. Call increment_index() if
finished current subfile. These two methods will make sure
the index is maintained during (de)serialization.
4. Return the processed contents either as a file-like object or
Python str. NO UNICODE.
Returns:
The str or file like object if got anything to return.
Raises:
EOFError if no content is found to return.
"""
raise NotImplementedError('%s not implemented.' % self.__class__.__name__)
# Binary formats.
class _BinaryFormat(FileFormat):
"""Base class for any binary formats.
This class just reads the entire file as raw str. All subclasses
should simply override NAME. That NAME will be passed to Python
to decode the bytes so NAME has to be a valid encoding.
"""
NAME = 'bytes'
def get_next(self):
"""Inherited."""
result = self.get_current_file().read()
if not result:
raise EOFError()
if self.NAME != _BinaryFormat.NAME:
return result.decode(self.NAME)
return result
class _Base64Format(_BinaryFormat):
"""Read entire file as base64 str."""
NAME = 'base64'
# Archive formats.
class _ZipFormat(FileFormat):
"""Read member files of zipfile."""
NAME = 'zip'
# _index specifies the next member file to read.
DEFAULT_INDEX_VALUE = 0
def get_next(self):
"""Inherited."""
cache = self.get_cache()
if 'zip_file' in cache:
zip_file = cache['zip_file']
infolist = cache['infolist']
else:
zip_file = zipfile.ZipFile(self._input_files_stream.current)
infolist = zip_file.infolist()
cache['zip_file'] = zip_file
cache['infolist'] = infolist
if self.get_index() == len(infolist):
raise EOFError()
result = zip_file.read(infolist[self.get_index()])
self.increment_index()
return result
@classmethod
def can_split(cls):
"""Inherited."""
return True
@classmethod
def split(cls, desired_size, start_index, opened_file, cache):
"""Inherited."""
if 'infolist' in cache:
infolist = cache['infolist']
else:
zip_file = zipfile.ZipFile(opened_file)
infolist = zip_file.infolist()
cache['infolist'] = infolist
index = start_index
while desired_size > 0 and index < len(infolist):
desired_size -= infolist[index].file_size
index += 1
return desired_size, index
# Text formats.
class _TextFormat(FileFormat):
"""Base class for any text format.
Text formats are those that require decoding before iteration.
This class takes care of the preprocessing logic of decoding.
"""
ARGUMENTS = set(['encoding'])
NAME = '_text'
def preprocess(self, file_object):
"""Decodes the entire file to read text."""
if 'encoding' in self._kwargs:
content = file_object.read()
content = content.decode(self._kwargs['encoding'])
file_object.close()
return StringIO.StringIO(content)
return file_object
class _LinesFormat(_TextFormat):
"""Read file line by line."""
NAME = 'lines'
def get_next(self):
"""Inherited."""
result = self.get_current_file().readline()
if not result:
raise EOFError()
if 'encoding' in self._kwargs:
result = result.encode(self._kwargs['encoding'])
return result
class _CSVFormat(_TextFormat):
ARGUMENTS = _TextFormat.ARGUMENTS.union(['delimiter'])
NAME = 'csv'
# TODO(user) implement this. csv exists now only to test parser.
FORMATS = {
# Binary formats.
'base64': _Base64Format,
'bytes': _BinaryFormat,
# Text format.
'csv': _CSVFormat,
'lines': _LinesFormat,
# Archive formats.
'zip': _ZipFormat}
| apache-2.0 |
jbuchbinder/youtube-dl | youtube_dl/extractor/cnbc.py | 69 | 1135 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import smuggle_url
class CNBCIE(InfoExtractor):
_VALID_URL = r'https?://video\.cnbc\.com/gallery/\?video=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://video.cnbc.com/gallery/?video=3000503714',
'info_dict': {
'id': '3000503714',
'ext': 'mp4',
'title': 'Fighting zombies is big business',
'description': 'md5:0c100d8e1a7947bd2feec9a5550e519e',
'timestamp': 1459332000,
'upload_date': '20160330',
'uploader': 'NBCU-CNBC',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
return {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
'url': smuggle_url(
'http://link.theplatform.com/s/gZWlPC/media/guid/2408950221/%s?mbr=true&manifest=m3u' % video_id,
{'force_smil_url': True}),
'id': video_id,
}
| unlicense |
ii0/bits | python/testefi.py | 2 | 2376 | # Copyright (c) 2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for EFI"""
import ctypes
import efi
import testsuite
def register_tests():
testsuite.add_test("EFI System Table CRC", test_system_services_crc, submenu="EFI Tests")
testsuite.add_test("EFI Runtime Services Table CRC", test_runtime_services_crc, submenu="EFI Tests")
testsuite.add_test("EFI Boot Services Table CRC", test_boot_services_crc, submenu="EFI Tests")
def test_system_services_crc():
testsuite.test("EFI System Table CRC32 is valid", efi.table_crc(efi.system_table))
def test_runtime_services_crc():
testsuite.test("EFI Runtime Services Table CRC32 is valid", efi.table_crc(efi.system_table.RuntimeServices.contents))
def test_boot_services_crc():
testsuite.test("EFI Boot Services Table CRC32 is valid", efi.table_crc(efi.system_table.BootServices.contents))
| bsd-3-clause |
ryfeus/lambda-packs | Spacy/source2.7/numpy/ma/tests/test_regression.py | 8 | 2437 | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
assert_, assert_array_equal, assert_allclose, run_module_suite,
suppress_warnings
)
class TestRegression(object):
def test_masked_array_create(self):
# Ticket #17
x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6],
mask=[0, 0, 0, 1, 1, 1, 0, 0])
assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]])
def test_masked_array(self):
# Ticket #61
np.ma.array(1, mask=[1])
def test_mem_masked_where(self):
# Ticket #62
from numpy.ma import masked_where, MaskType
a = np.zeros((1, 1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b, a)
a-c
def test_masked_array_multiply(self):
# Ticket #254
a = np.ma.zeros((4, 1))
a[2, 0] = np.ma.masked
b = np.zeros((4, 2))
a*b
b*a
def test_masked_array_repeat(self):
# Ticket #271
np.ma.array([1], mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
# Ticket #1256
repr(np.ma.array(u"Unicode"))
def test_atleast_2d(self):
# Ticket #1559
a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])
b = np.atleast_2d(a)
assert_(a.mask.ndim == 1)
assert_(b.mask.ndim == 2)
def test_set_fill_value_unicode_py3(self):
# Ticket #2733
a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])
a.fill_value = 'X'
assert_(a.fill_value == 'X')
def test_var_sets_maskedarray_scalar(self):
# Issue gh-2757
a = np.ma.array(np.arange(5), mask=True)
mout = np.ma.array(-1, dtype=float)
a.var(out=mout)
assert_(mout._data == 0)
def test_ddof_corrcoef(self):
# See gh-3336
x = np.ma.masked_equal([1, 2, 3, 4, 5], 4)
y = np.array([2, 2.5, 3.1, 3, 5])
# this test can be removed after deprecation.
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "bias and ddof have no effect")
r0 = np.ma.corrcoef(x, y, ddof=0)
r1 = np.ma.corrcoef(x, y, ddof=1)
# ddof should not have an effect (it gets cancelled out)
assert_allclose(r0.data, r1.data)
if __name__ == "__main__":
run_module_suite()
| mit |
kawasaki2013/python-for-android-x86 | python-build/python-libs/gdata/build/lib/gdata/tlslite/integration/XMLRPCTransport.py | 271 | 5812 | """TLS Lite + xmlrpclib."""
import xmlrpclib
import httplib
from gdata.tlslite.integration.HTTPTLSConnection import HTTPTLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
class XMLRPCTransport(xmlrpclib.Transport, ClientHelper):
"""Handles an HTTPS transaction to an XML-RPC server."""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new XMLRPCTransport.
An instance of this class can be passed to L{xmlrpclib.ServerProxy}
to use TLS with XML-RPC calls::
from tlslite.api import XMLRPCTransport
from xmlrpclib import ServerProxy
transport = XMLRPCTransport(user="alice", password="abra123")
server = ServerProxy("https://localhost", transport)
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Thus you should be prepared to handle TLS-specific
exceptions when calling methods of L{xmlrpclib.ServerProxy}. See the
client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
def make_connection(self, host):
# create a HTTPS connection object from a host descriptor
host, extra_headers, x509 = self.get_host_info(host)
http = HTTPTLSConnection(host, None,
self.username, self.password,
self.sharedKey,
self.certChain, self.privateKey,
self.checker.cryptoID,
self.checker.protocol,
self.checker.x509Fingerprint,
self.checker.x509TrustList,
self.checker.x509CommonName,
self.settings)
http2 = httplib.HTTP()
http2._setup(http)
return http2
| apache-2.0 |
forfuns/shadowsocks | shadowsocks/lru_cache.py | 983 | 4290 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import collections
import logging
import time
# this LRUCache is optimized for concurrency, not QPS
# n: concurrency, keys stored in the cache
# m: visits not timed out, proportional to QPS * timeout
# get & set is O(1), not O(n). thus we can support very large n
# TODO: if timeout or QPS is too large, then this cache is not very efficient,
# as sweep() causes long pause
class LRUCache(collections.MutableMapping):
"""This class is not thread safe"""
def __init__(self, timeout=60, close_callback=None, *args, **kwargs):
self.timeout = timeout
self.close_callback = close_callback
self._store = {}
self._time_to_keys = collections.defaultdict(list)
self._keys_to_last_time = {}
self._last_visits = collections.deque()
self._closed_values = set()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._time_to_keys[t].append(key)
self._last_visits.append(t)
return self._store[key]
def __setitem__(self, key, value):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._store[key] = value
self._time_to_keys[t].append(key)
self._last_visits.append(t)
def __delitem__(self, key):
# O(1)
del self._store[key]
del self._keys_to_last_time[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def sweep(self):
# O(m)
now = time.time()
c = 0
while len(self._last_visits) > 0:
least = self._last_visits[0]
if now - least <= self.timeout:
break
if self.close_callback is not None:
for key in self._time_to_keys[least]:
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
value = self._store[key]
if value not in self._closed_values:
self.close_callback(value)
self._closed_values.add(value)
for key in self._time_to_keys[least]:
self._last_visits.popleft()
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
del self._store[key]
del self._keys_to_last_time[key]
c += 1
del self._time_to_keys[least]
if c:
self._closed_values.clear()
logging.debug('%d keys swept' % c)
def test():
c = LRUCache(timeout=0.3)
c['a'] = 1
assert c['a'] == 1
time.sleep(0.5)
c.sweep()
assert 'a' not in c
c['a'] = 2
c['b'] = 3
time.sleep(0.2)
c.sweep()
assert c['a'] == 2
assert c['b'] == 3
time.sleep(0.2)
c.sweep()
c['b']
time.sleep(0.2)
c.sweep()
assert 'a' not in c
assert c['b'] == 3
time.sleep(0.5)
c.sweep()
assert 'a' not in c
assert 'b' not in c
global close_cb_called
close_cb_called = False
def close_cb(t):
global close_cb_called
assert not close_cb_called
close_cb_called = True
c = LRUCache(timeout=0.1, close_callback=close_cb)
c['s'] = 1
c['s']
time.sleep(0.1)
c['s']
time.sleep(0.3)
c.sweep()
if __name__ == '__main__':
test()
| apache-2.0 |
savanu/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/extensions.py | 489 | 31780 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import common
from mod_pywebsocket import util
from mod_pywebsocket.http_header_util import quote_if_necessary
# The list of available server side extension processor classes.
_available_processors = {}
_compression_extension_names = []
class ExtensionProcessorInterface(object):
def __init__(self, request):
self._logger = util.get_class_logger(self)
self._request = request
self._active = True
def request(self):
return self._request
def name(self):
return None
def check_consistency_with_other_processors(self, processors):
pass
def set_active(self, active):
self._active = active
def is_active(self):
return self._active
def _get_extension_response_internal(self):
return None
def get_extension_response(self):
if not self._active:
self._logger.debug('Extension %s is deactivated', self.name())
return None
response = self._get_extension_response_internal()
if response is None:
self._active = False
return response
def _setup_stream_options_internal(self, stream_options):
pass
def setup_stream_options(self, stream_options):
if self._active:
self._setup_stream_options_internal(stream_options)
def _log_outgoing_compression_ratio(
logger, original_bytes, filtered_bytes, average_ratio):
# Print inf when ratio is not available.
ratio = float('inf')
if original_bytes != 0:
ratio = float(filtered_bytes) / original_bytes
logger.debug('Outgoing compression ratio: %f (average: %f)' %
(ratio, average_ratio))
def _log_incoming_compression_ratio(
logger, received_bytes, filtered_bytes, average_ratio):
# Print inf when ratio is not available.
ratio = float('inf')
if filtered_bytes != 0:
ratio = float(received_bytes) / filtered_bytes
logger.debug('Incoming compression ratio: %f (average: %f)' %
(ratio, average_ratio))
def _parse_window_bits(bits):
"""Return parsed integer value iff the given string conforms to the
grammar of the window bits extension parameters.
"""
if bits is None:
raise ValueError('Value is required')
# For non integer values such as "10.0", ValueError will be raised.
int_bits = int(bits)
# First condition is to drop leading zero case e.g. "08".
if bits != str(int_bits) or int_bits < 8 or int_bits > 15:
raise ValueError('Invalid value: %r' % bits)
return int_bits
class _AverageRatioCalculator(object):
"""Stores total bytes of original and result data, and calculates average
result / original ratio.
"""
def __init__(self):
self._total_original_bytes = 0
self._total_result_bytes = 0
def add_original_bytes(self, value):
self._total_original_bytes += value
def add_result_bytes(self, value):
self._total_result_bytes += value
def get_average_ratio(self):
if self._total_original_bytes != 0:
return (float(self._total_result_bytes) /
self._total_original_bytes)
else:
return float('inf')
class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
"""deflate-frame extension processor.
Specification:
http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate
"""
_WINDOW_BITS_PARAM = 'max_window_bits'
_NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._response_window_bits = None
self._response_no_context_takeover = False
self._bfinal = False
# Calculates
# (Total outgoing bytes supplied to this filter) /
# (Total bytes sent to the network after applying this filter)
self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
# Calculates
# (Total bytes received from the network) /
# (Total incoming bytes obtained after applying this filter)
self._incoming_average_ratio_calculator = _AverageRatioCalculator()
def name(self):
return common.DEFLATE_FRAME_EXTENSION
def _get_extension_response_internal(self):
# Any unknown parameter will be just ignored.
window_bits = None
if self._request.has_parameter(self._WINDOW_BITS_PARAM):
window_bits = self._request.get_parameter_value(
self._WINDOW_BITS_PARAM)
try:
window_bits = _parse_window_bits(window_bits)
except ValueError, e:
return None
no_context_takeover = self._request.has_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM)
if (no_context_takeover and
self._request.get_parameter_value(
self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
return None
self._rfc1979_deflater = util._RFC1979Deflater(
window_bits, no_context_takeover)
self._rfc1979_inflater = util._RFC1979Inflater()
self._compress_outgoing = True
response = common.ExtensionParameter(self._request.name())
if self._response_window_bits is not None:
response.add_parameter(
self._WINDOW_BITS_PARAM, str(self._response_window_bits))
if self._response_no_context_takeover:
response.add_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM, None)
self._logger.debug(
'Enable %s extension ('
'request: window_bits=%s; no_context_takeover=%r, '
'response: window_wbits=%s; no_context_takeover=%r)' %
(self._request.name(),
window_bits,
no_context_takeover,
self._response_window_bits,
self._response_no_context_takeover))
return response
def _setup_stream_options_internal(self, stream_options):
class _OutgoingFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._outgoing_filter(frame)
class _IncomingFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._incoming_filter(frame)
stream_options.outgoing_frame_filters.append(
_OutgoingFilter(self))
stream_options.incoming_frame_filters.insert(
0, _IncomingFilter(self))
def set_response_window_bits(self, value):
self._response_window_bits = value
def set_response_no_context_takeover(self, value):
self._response_no_context_takeover = value
def set_bfinal(self, value):
self._bfinal = value
def enable_outgoing_compression(self):
self._compress_outgoing = True
def disable_outgoing_compression(self):
self._compress_outgoing = False
def _outgoing_filter(self, frame):
"""Transform outgoing frames. This method is called only by
an _OutgoingFilter instance.
"""
original_payload_size = len(frame.payload)
self._outgoing_average_ratio_calculator.add_original_bytes(
original_payload_size)
if (not self._compress_outgoing or
common.is_control_opcode(frame.opcode)):
self._outgoing_average_ratio_calculator.add_result_bytes(
original_payload_size)
return
frame.payload = self._rfc1979_deflater.filter(
frame.payload, bfinal=self._bfinal)
frame.rsv1 = 1
filtered_payload_size = len(frame.payload)
self._outgoing_average_ratio_calculator.add_result_bytes(
filtered_payload_size)
_log_outgoing_compression_ratio(
self._logger,
original_payload_size,
filtered_payload_size,
self._outgoing_average_ratio_calculator.get_average_ratio())
def _incoming_filter(self, frame):
"""Transform incoming frames. This method is called only by
an _IncomingFilter instance.
"""
received_payload_size = len(frame.payload)
self._incoming_average_ratio_calculator.add_result_bytes(
received_payload_size)
if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
self._incoming_average_ratio_calculator.add_original_bytes(
received_payload_size)
return
frame.payload = self._rfc1979_inflater.filter(frame.payload)
frame.rsv1 = 0
filtered_payload_size = len(frame.payload)
self._incoming_average_ratio_calculator.add_original_bytes(
filtered_payload_size)
_log_incoming_compression_ratio(
self._logger,
received_payload_size,
filtered_payload_size,
self._incoming_average_ratio_calculator.get_average_ratio())
_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
_compression_extension_names.append(common.DEFLATE_FRAME_EXTENSION)
_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
_compression_extension_names.append(common.X_WEBKIT_DEFLATE_FRAME_EXTENSION)
def _parse_compression_method(data):
"""Parses the value of "method" extension parameter."""
return common.parse_extensions(data)
def _create_accepted_method_desc(method_name, method_params):
"""Creates accepted-method-desc from given method name and parameters"""
extension = common.ExtensionParameter(method_name)
for name, value in method_params:
extension.add_parameter(name, value)
return common.format_extension(extension)
class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
"""Base class for perframe-compress and permessage-compress extension."""
_METHOD_PARAM = 'method'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._compression_method_name = None
self._compression_processor = None
self._compression_processor_hook = None
def name(self):
return ''
def _lookup_compression_processor(self, method_desc):
return None
def _get_compression_processor_response(self):
"""Looks up the compression processor based on the self._request and
returns the compression processor's response.
"""
method_list = self._request.get_parameter_value(self._METHOD_PARAM)
if method_list is None:
return None
methods = _parse_compression_method(method_list)
if methods is None:
return None
comression_processor = None
# The current implementation tries only the first method that matches
# supported algorithm. Following methods aren't tried even if the
# first one is rejected.
# TODO(bashi): Need to clarify this behavior.
for method_desc in methods:
compression_processor = self._lookup_compression_processor(
method_desc)
if compression_processor is not None:
self._compression_method_name = method_desc.name()
break
if compression_processor is None:
return None
if self._compression_processor_hook:
self._compression_processor_hook(compression_processor)
processor_response = compression_processor.get_extension_response()
if processor_response is None:
return None
self._compression_processor = compression_processor
return processor_response
def _get_extension_response_internal(self):
processor_response = self._get_compression_processor_response()
if processor_response is None:
return None
response = common.ExtensionParameter(self._request.name())
accepted_method_desc = _create_accepted_method_desc(
self._compression_method_name,
processor_response.get_parameters())
response.add_parameter(self._METHOD_PARAM, accepted_method_desc)
self._logger.debug(
'Enable %s extension (method: %s)' %
(self._request.name(), self._compression_method_name))
return response
def _setup_stream_options_internal(self, stream_options):
if self._compression_processor is None:
return
self._compression_processor.setup_stream_options(stream_options)
def set_compression_processor_hook(self, hook):
self._compression_processor_hook = hook
def get_compression_processor(self):
return self._compression_processor
class PerMessageDeflateExtensionProcessor(ExtensionProcessorInterface):
"""permessage-deflate extension processor. It's also used for
permessage-compress extension when the deflate method is chosen.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-08
"""
_SERVER_MAX_WINDOW_BITS_PARAM = 'server_max_window_bits'
_SERVER_NO_CONTEXT_TAKEOVER_PARAM = 'server_no_context_takeover'
_CLIENT_MAX_WINDOW_BITS_PARAM = 'client_max_window_bits'
_CLIENT_NO_CONTEXT_TAKEOVER_PARAM = 'client_no_context_takeover'
def __init__(self, request, draft08=True):
"""Construct PerMessageDeflateExtensionProcessor
Args:
draft08: Follow the constraints on the parameters that were not
specified for permessage-compress but are specified for
permessage-deflate as on
draft-ietf-hybi-permessage-compression-08.
"""
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._preferred_client_max_window_bits = None
self._client_no_context_takeover = False
self._draft08 = draft08
def name(self):
return 'deflate'
def _get_extension_response_internal(self):
if self._draft08:
for name in self._request.get_parameter_names():
if name not in [self._SERVER_MAX_WINDOW_BITS_PARAM,
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
self._CLIENT_MAX_WINDOW_BITS_PARAM]:
self._logger.debug('Unknown parameter: %r', name)
return None
else:
# Any unknown parameter will be just ignored.
pass
server_max_window_bits = None
if self._request.has_parameter(self._SERVER_MAX_WINDOW_BITS_PARAM):
server_max_window_bits = self._request.get_parameter_value(
self._SERVER_MAX_WINDOW_BITS_PARAM)
try:
server_max_window_bits = _parse_window_bits(
server_max_window_bits)
except ValueError, e:
self._logger.debug('Bad %s parameter: %r',
self._SERVER_MAX_WINDOW_BITS_PARAM,
e)
return None
server_no_context_takeover = self._request.has_parameter(
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM)
if (server_no_context_takeover and
self._request.get_parameter_value(
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM) is not None):
self._logger.debug('%s parameter must not have a value: %r',
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
server_no_context_takeover)
return None
# client_max_window_bits from a client indicates whether the client can
# accept client_max_window_bits from a server or not.
client_client_max_window_bits = self._request.has_parameter(
self._CLIENT_MAX_WINDOW_BITS_PARAM)
if (self._draft08 and
client_client_max_window_bits and
self._request.get_parameter_value(
self._CLIENT_MAX_WINDOW_BITS_PARAM) is not None):
self._logger.debug('%s parameter must not have a value in a '
'client\'s opening handshake: %r',
self._CLIENT_MAX_WINDOW_BITS_PARAM,
client_client_max_window_bits)
return None
self._rfc1979_deflater = util._RFC1979Deflater(
server_max_window_bits, server_no_context_takeover)
# Note that we prepare for incoming messages compressed with window
# bits upto 15 regardless of the client_max_window_bits value to be
# sent to the client.
self._rfc1979_inflater = util._RFC1979Inflater()
self._framer = _PerMessageDeflateFramer(
server_max_window_bits, server_no_context_takeover)
self._framer.set_bfinal(False)
self._framer.set_compress_outgoing_enabled(True)
response = common.ExtensionParameter(self._request.name())
if server_max_window_bits is not None:
response.add_parameter(
self._SERVER_MAX_WINDOW_BITS_PARAM,
str(server_max_window_bits))
if server_no_context_takeover:
response.add_parameter(
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM, None)
if self._preferred_client_max_window_bits is not None:
if self._draft08 and not client_client_max_window_bits:
self._logger.debug('Processor is configured to use %s but '
'the client cannot accept it',
self._CLIENT_MAX_WINDOW_BITS_PARAM)
return None
response.add_parameter(
self._CLIENT_MAX_WINDOW_BITS_PARAM,
str(self._preferred_client_max_window_bits))
if self._client_no_context_takeover:
response.add_parameter(
self._CLIENT_NO_CONTEXT_TAKEOVER_PARAM, None)
self._logger.debug(
'Enable %s extension ('
'request: server_max_window_bits=%s; '
'server_no_context_takeover=%r, '
'response: client_max_window_bits=%s; '
'client_no_context_takeover=%r)' %
(self._request.name(),
server_max_window_bits,
server_no_context_takeover,
self._preferred_client_max_window_bits,
self._client_no_context_takeover))
return response
def _setup_stream_options_internal(self, stream_options):
self._framer.setup_stream_options(stream_options)
def set_client_max_window_bits(self, value):
"""If this option is specified, this class adds the
client_max_window_bits extension parameter to the handshake response,
but doesn't reduce the LZ77 sliding window size of its inflater.
I.e., you can use this for testing client implementation but cannot
reduce memory usage of this class.
If this method has been called with True and an offer without the
client_max_window_bits extension parameter is received,
- (When processing the permessage-deflate extension) this processor
declines the request.
- (When processing the permessage-compress extension) this processor
accepts the request.
"""
self._preferred_client_max_window_bits = value
def set_client_no_context_takeover(self, value):
"""If this option is specified, this class adds the
client_no_context_takeover extension parameter to the handshake
response, but doesn't reset inflater for each message. I.e., you can
use this for testing client implementation but cannot reduce memory
usage of this class.
"""
self._client_no_context_takeover = value
def set_bfinal(self, value):
self._framer.set_bfinal(value)
def enable_outgoing_compression(self):
self._framer.set_compress_outgoing_enabled(True)
def disable_outgoing_compression(self):
self._framer.set_compress_outgoing_enabled(False)
class _PerMessageDeflateFramer(object):
"""A framer for extensions with per-message DEFLATE feature."""
def __init__(self, deflate_max_window_bits, deflate_no_context_takeover):
self._logger = util.get_class_logger(self)
self._rfc1979_deflater = util._RFC1979Deflater(
deflate_max_window_bits, deflate_no_context_takeover)
self._rfc1979_inflater = util._RFC1979Inflater()
self._bfinal = False
self._compress_outgoing_enabled = False
# True if a message is fragmented and compression is ongoing.
self._compress_ongoing = False
# Calculates
# (Total outgoing bytes supplied to this filter) /
# (Total bytes sent to the network after applying this filter)
self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
# Calculates
# (Total bytes received from the network) /
# (Total incoming bytes obtained after applying this filter)
self._incoming_average_ratio_calculator = _AverageRatioCalculator()
def set_bfinal(self, value):
self._bfinal = value
def set_compress_outgoing_enabled(self, value):
self._compress_outgoing_enabled = value
def _process_incoming_message(self, message, decompress):
if not decompress:
return message
received_payload_size = len(message)
self._incoming_average_ratio_calculator.add_result_bytes(
received_payload_size)
message = self._rfc1979_inflater.filter(message)
filtered_payload_size = len(message)
self._incoming_average_ratio_calculator.add_original_bytes(
filtered_payload_size)
_log_incoming_compression_ratio(
self._logger,
received_payload_size,
filtered_payload_size,
self._incoming_average_ratio_calculator.get_average_ratio())
return message
def _process_outgoing_message(self, message, end, binary):
if not binary:
message = message.encode('utf-8')
if not self._compress_outgoing_enabled:
return message
original_payload_size = len(message)
self._outgoing_average_ratio_calculator.add_original_bytes(
original_payload_size)
message = self._rfc1979_deflater.filter(
message, end=end, bfinal=self._bfinal)
filtered_payload_size = len(message)
self._outgoing_average_ratio_calculator.add_result_bytes(
filtered_payload_size)
_log_outgoing_compression_ratio(
self._logger,
original_payload_size,
filtered_payload_size,
self._outgoing_average_ratio_calculator.get_average_ratio())
if not self._compress_ongoing:
self._outgoing_frame_filter.set_compression_bit()
self._compress_ongoing = not end
return message
def _process_incoming_frame(self, frame):
if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode):
self._incoming_message_filter.decompress_next_message()
frame.rsv1 = 0
def _process_outgoing_frame(self, frame, compression_bit):
if (not compression_bit or
common.is_control_opcode(frame.opcode)):
return
frame.rsv1 = 1
def setup_stream_options(self, stream_options):
"""Creates filters and sets them to the StreamOptions."""
class _OutgoingMessageFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, message, end=True, binary=False):
return self._parent._process_outgoing_message(
message, end, binary)
class _IncomingMessageFilter(object):
def __init__(self, parent):
self._parent = parent
self._decompress_next_message = False
def decompress_next_message(self):
self._decompress_next_message = True
def filter(self, message):
message = self._parent._process_incoming_message(
message, self._decompress_next_message)
self._decompress_next_message = False
return message
self._outgoing_message_filter = _OutgoingMessageFilter(self)
self._incoming_message_filter = _IncomingMessageFilter(self)
stream_options.outgoing_message_filters.append(
self._outgoing_message_filter)
stream_options.incoming_message_filters.append(
self._incoming_message_filter)
class _OutgoingFrameFilter(object):
def __init__(self, parent):
self._parent = parent
self._set_compression_bit = False
def set_compression_bit(self):
self._set_compression_bit = True
def filter(self, frame):
self._parent._process_outgoing_frame(
frame, self._set_compression_bit)
self._set_compression_bit = False
class _IncomingFrameFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._process_incoming_frame(frame)
self._outgoing_frame_filter = _OutgoingFrameFilter(self)
self._incoming_frame_filter = _IncomingFrameFilter(self)
stream_options.outgoing_frame_filters.append(
self._outgoing_frame_filter)
stream_options.incoming_frame_filters.append(
self._incoming_frame_filter)
stream_options.encode_text_message_to_utf8 = False
_available_processors[common.PERMESSAGE_DEFLATE_EXTENSION] = (
PerMessageDeflateExtensionProcessor)
# TODO(tyoshino): Reorganize class names.
_compression_extension_names.append('deflate')
class PerMessageCompressExtensionProcessor(
CompressionExtensionProcessorBase):
"""permessage-compress extension processor.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression
"""
_DEFLATE_METHOD = 'deflate'
def __init__(self, request):
CompressionExtensionProcessorBase.__init__(self, request)
def name(self):
return common.PERMESSAGE_COMPRESSION_EXTENSION
def _lookup_compression_processor(self, method_desc):
if method_desc.name() == self._DEFLATE_METHOD:
return PerMessageDeflateExtensionProcessor(method_desc, False)
return None
_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = (
PerMessageCompressExtensionProcessor)
_compression_extension_names.append(common.PERMESSAGE_COMPRESSION_EXTENSION)
class MuxExtensionProcessor(ExtensionProcessorInterface):
"""WebSocket multiplexing extension processor."""
_QUOTA_PARAM = 'quota'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._quota = 0
self._extensions = []
def name(self):
return common.MUX_EXTENSION
def check_consistency_with_other_processors(self, processors):
before_mux = True
for processor in processors:
name = processor.name()
if name == self.name():
before_mux = False
continue
if not processor.is_active():
continue
if before_mux:
# Mux extension cannot be used after extensions
# that depend on frame boundary, extension data field, or any
# reserved bits which are attributed to each frame.
if (name == common.DEFLATE_FRAME_EXTENSION or
name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
self.set_active(False)
return
else:
# Mux extension should not be applied before any history-based
# compression extension.
if (name == common.DEFLATE_FRAME_EXTENSION or
name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION or
name == common.PERMESSAGE_COMPRESSION_EXTENSION or
name == common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION):
self.set_active(False)
return
def _get_extension_response_internal(self):
self._active = False
quota = self._request.get_parameter_value(self._QUOTA_PARAM)
if quota is not None:
try:
quota = int(quota)
except ValueError, e:
return None
if quota < 0 or quota >= 2 ** 32:
return None
self._quota = quota
self._active = True
return common.ExtensionParameter(common.MUX_EXTENSION)
def _setup_stream_options_internal(self, stream_options):
pass
def set_quota(self, quota):
self._quota = quota
def quota(self):
return self._quota
def set_extensions(self, extensions):
self._extensions = extensions
def extensions(self):
return self._extensions
_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
def get_extension_processor(extension_request):
"""Given an ExtensionParameter representing an extension offer received
from a client, configures and returns an instance of the corresponding
extension processor class.
"""
processor_class = _available_processors.get(extension_request.name())
if processor_class is None:
return None
return processor_class(extension_request)
def is_compression_extension(extension_name):
return extension_name in _compression_extension_names
# vi:sts=4 sw=4 et
| mpl-2.0 |
disqus/overseer | overseer/templatetags/overseer_helpers.py | 2 | 1722 | """
overseer.templatetags.overseer_helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import datetime
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter
def timesince(value):
from django.template.defaultfilters import timesince
if not value:
return 'Never'
if value < datetime.datetime.now() - datetime.timedelta(days=5):
return value.date()
value = (' '.join(timesince(value).split(' ')[0:2])).strip(',')
if value == '0 minutes':
return 'Just now'
if value == '1 day':
return 'Yesterday'
return value + ' ago'
@register.filter(name='truncatechars')
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of chars.
Argument: Number of chars to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
if len(value) > length:
return value[:length] + '...'
return value
truncatechars.is_safe = True
@register.filter
def duration(value):
if isinstance(value, datetime.timedelta):
value = value.days * 24 * 3600 + value.seconds
hours, minutes, seconds = 0, 0, 0
if value > 3600:
hours = value / 3600
value = value % 3600
if value > 60:
minutes = value / 60
value = value % 60
seconds = value
if hours:
return '%s hours' % (hours,)
if minutes:
return '%s minutes' % (minutes,)
if seconds:
return '%s seconds' % (seconds,)
return 'n/a' | apache-2.0 |
magul/volontulo | backend/apps/volontulo/tests/views/api/organizations/test_read.py | 3 | 3698 | """
.. module:: test_read
"""
from rest_framework import status
from rest_framework.test import APITestCase
from apps.volontulo.factories import OrganizationFactory
from apps.volontulo.factories import UserFactory
class _TestOrganizationsReadAPIView(APITestCase):
"""Tests for REST API's read organization view."""
def _test_organization_read_fields(self, organization):
"""Test read's fields of organizations REST API endpoint."""
self.assertIsInstance(organization.pop('address'), str)
self.assertIsInstance(organization.pop('description'), str)
self.assertIsInstance(organization.pop('id'), int)
self.assertIsInstance(organization.pop('name'), str)
self.assertIsInstance(organization.pop('slug'), str)
self.assertIsInstance(organization.pop('url'), str)
self.assertEqual(len(organization), 0)
class TestAdminUserOrganizationsReadAPIView(_TestOrganizationsReadAPIView):
"""Tests for REST API's read organization view for admin user."""
def setUp(self):
"""Set up each test."""
super().setUp()
self.client.force_login(UserFactory(
userprofile__is_administrator=True
))
def test_organization_read_status(self):
"""Test organization's read status for admin user.
Organizations are readable for everyone.
"""
response = self.client.get(
'/api/organizations/{id}/'.format(id=OrganizationFactory().id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._test_organization_read_fields(response.data)
class TestOrganizationUserOrganizationsReadAPIView(
_TestOrganizationsReadAPIView):
"""Tests for API's read organization view for user with organization."""
def setUp(self):
"""Set up each test."""
super().setUp()
self.client.force_login(UserFactory(
userprofile__organizations=[OrganizationFactory()]
))
def test_organization_read_status(self):
"""Test organization's read status for user with organization.
Organizations are readable for everyone.
"""
response = self.client.get(
'/api/organizations/{id}/'.format(id=OrganizationFactory().id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._test_organization_read_fields(response.data)
class TestRegularUserOrganizationsReadAPIView(_TestOrganizationsReadAPIView):
"""Tests for REST API's read organization view for regular user."""
def setUp(self):
"""Set up each test."""
super().setUp()
self.client.force_login(UserFactory())
def test_organization_read_status(self):
"""Test organization's read status for regular user.
Organizations are readable for everyone.
"""
self.client.force_login(UserFactory())
response = self.client.get(
'/api/organizations/{id}/'.format(id=OrganizationFactory().id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._test_organization_read_fields(response.data)
class TestAnonymousUserOrganizationsReadAPIView(_TestOrganizationsReadAPIView):
"""Tests for REST API's read organization view for anonymous user."""
def test_organization_read_status(self):
"""Test organization's read status for anonymous user.
Organizations are readable for everyone.
"""
response = self.client.get(
'/api/organizations/{id}/'.format(id=OrganizationFactory().id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._test_organization_read_fields(response.data)
| mit |
NcLang/vimrc | sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/python-future/src/future/moves/urllib/request.py | 70 | 3525 | from __future__ import absolute_import
from future.standard_library import suspend_hooks
from future.utils import PY3
if PY3:
from urllib.request import *
# This aren't in __all__:
from urllib.request import (getproxies,
pathname2url,
proxy_bypass,
quote,
request_host,
splitattr,
splithost,
splitpasswd,
splitport,
splitquery,
splittag,
splittype,
splituser,
splitvalue,
thishost,
to_bytes,
unquote,
unwrap,
url2pathname,
urlcleanup,
urljoin,
urlopen,
urlparse,
urlretrieve,
urlsplit,
urlunparse)
else:
__future_module__ = True
with suspend_hooks():
from urllib import *
from urllib2 import *
from urlparse import *
# Rename:
from urllib import toBytes # missing from __all__ on Py2.6
to_bytes = toBytes
# from urllib import (pathname2url,
# url2pathname,
# getproxies,
# urlretrieve,
# urlcleanup,
# URLopener,
# FancyURLopener,
# proxy_bypass)
# from urllib2 import (
# AbstractBasicAuthHandler,
# AbstractDigestAuthHandler,
# BaseHandler,
# CacheFTPHandler,
# FileHandler,
# FTPHandler,
# HTTPBasicAuthHandler,
# HTTPCookieProcessor,
# HTTPDefaultErrorHandler,
# HTTPDigestAuthHandler,
# HTTPErrorProcessor,
# HTTPHandler,
# HTTPPasswordMgr,
# HTTPPasswordMgrWithDefaultRealm,
# HTTPRedirectHandler,
# HTTPSHandler,
# URLError,
# build_opener,
# install_opener,
# OpenerDirector,
# ProxyBasicAuthHandler,
# ProxyDigestAuthHandler,
# ProxyHandler,
# Request,
# UnknownHandler,
# urlopen,
# )
# from urlparse import (
# urldefrag
# urljoin,
# urlparse,
# urlunparse,
# urlsplit,
# urlunsplit,
# parse_qs,
# parse_q"
# )
| mit |
Schevo/kiwi | kiwi/db/sqlobj.py | 2 | 9074 | # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Johan Dahlin <jdahlin@async.com.br>
##
"""
SQLObject integration for Kiwi
"""
from sqlobject.sqlbuilder import func, AND, OR, LIKE, SQLExpression, NOT
from kiwi.db.query import NumberQueryState, StringQueryState, \
DateQueryState, DateIntervalQueryState, QueryExecuter, \
NumberIntervalQueryState
from kiwi.interfaces import ISearchFilter
class _FTI(SQLExpression):
def __init__(self, q):
self.q = q
def __sqlrepr__(self, db):
return self.q
class SQLObjectQueryExecuter(QueryExecuter):
def __init__(self, conn=None):
QueryExecuter.__init__(self)
self.conn = conn
self.table = None
self._query_callbacks = []
self._filter_query_callbacks = {}
self._query = self._default_query
self._full_text_indexes = {}
#
# Public API
#
def set_table(self, table):
"""
Sets the SQLObject table/object for this executer
@param table: a SQLObject subclass
"""
self.table = table
def add_query_callback(self, callback):
"""
Adds a generic query callback
@param callback: a callable
"""
if not callable(callback):
raise TypeError
self._query_callbacks.append(callback)
def add_filter_query_callback(self, search_filter, callback):
"""
Adds a query callback for the filter search_filter
@param search_filter: a search filter
@param callback: a callable
"""
if not ISearchFilter.providedBy(search_filter):
raise TypeError
if not callable(callback):
raise TypeError
l = self._filter_query_callbacks.setdefault(search_filter, [])
l.append(callback)
def set_query(self, callback):
"""
Overrides the default query mechanism.
@param callback: a callable which till take two arguments:
(query, connection)
"""
if callback is None:
callback = self._default_query
elif not callable(callback):
raise TypeError
self._query = callback
#
# QueryBuilder
#
def search(self, states):
"""
Execute a search.
@param states:
"""
if self.table is None:
raise ValueError("table cannot be None")
table = self.table
queries = []
self._having = []
for state in states:
search_filter = state.filter
assert state.filter
# Column query
if search_filter in self._columns:
query = self._construct_state_query(
table, state, self._columns[search_filter])
if query:
queries.append(query)
# Custom per filter/state query.
elif search_filter in self._filter_query_callbacks:
for callback in self._filter_query_callbacks[search_filter]:
query = callback(state)
if query:
queries.append(query)
else:
if (self._query == self._default_query and
not self._query_callbacks):
raise ValueError(
"You need to add a search column or a query callback "
"for filter %s" % (search_filter))
for callback in self._query_callbacks:
query = callback(states)
if query:
queries.append(query)
if queries:
query = AND(*queries)
else:
query = None
having = None
if self._having:
having = AND(self._having)
result = self._query(query, having, self.conn)
return result.limit(self.get_limit())
#
# Private
#
def _add_having(self, clause):
self._having.append(clause)
def _default_query(self, query, having, conn):
return self.table.select(query, having=having, connection=conn)
def _construct_state_query(self, table, state, columns):
queries = []
having_queries = []
for column in columns:
query = None
table_field = getattr(table.q, column)
# If the field has an aggregate function (sum, avg, etc..), then
# this clause should be in the HAVING part of the query.
use_having = table_field.hasSQLCall()
if isinstance(state, NumberQueryState):
query = self._parse_number_state(state, table_field)
elif isinstance(state, NumberIntervalQueryState):
query = self._parse_number_interval_state(state, table_field)
elif isinstance(state, StringQueryState):
query = self._parse_string_state(state, table_field)
elif isinstance(state, DateQueryState):
query = self._parse_date_state(state, table_field)
elif isinstance(state, DateIntervalQueryState):
query = self._parse_date_interval_state(state, table_field)
else:
raise NotImplementedError(state.__class__.__name__)
if query and use_having:
having_queries.append(query)
query = None
if query:
queries.append(query)
if having_queries:
self._add_having(OR(*having_queries))
if queries:
return OR(*queries)
def _postgres_has_fti_index(self, table_name, column_name):
# Assume that the PostgreSQL full text index columns are
# named xxx_fti where xxx is the name of the column
res = self.conn.queryOne(
"""SELECT 1
FROM information_schema.columns
WHERE table_name = %s AND
column_name = %s AND
udt_name = 'tsvector';""" % (
self.conn.sqlrepr(table_name),
self.conn.sqlrepr(column_name)))
return bool(res)
def _check_has_fulltext_index(self, table_name, field_name):
fullname = table_name + field_name
if fullname in self._full_text_indexes:
return self._full_text_indexes[fullname]
else:
value = False
if 'postgres' in self.conn.__class__.__module__:
value = self._postgres_has_fti_index(table_name,
field_name + '_fti')
self._full_text_indexes[fullname] = value
return value
def _parse_number_state(self, state, table_field):
if state.value is not None:
return table_field == state.value
def _parse_number_interval_state(self, state, table_field):
queries = []
if state.start is not None:
queries.append(table_field >= state.start)
if state.end is not None:
queries.append(table_field <= state.end)
if queries:
return AND(*queries)
def _parse_string_state(self, state, table_field):
if not state.text:
return
if self._check_has_fulltext_index(table_field.tableName,
table_field.fieldName):
value = state.text.lower()
# FTI operators:
# & = AND
# | = OR
value = value.replace(' ', ' & ')
retval = _FTI("%s.%s_fti @@ %s::tsquery" % (
table_field.tableName,
table_field.fieldName,
self.conn.sqlrepr(value)))
else:
text = '%%%s%%' % state.text.lower()
retval = LIKE(func.LOWER(table_field), text)
if state.mode == StringQueryState.NOT_CONTAINS:
retval = NOT(retval)
return retval
def _parse_date_state(self, state, table_field):
if state.date:
return func.DATE(table_field) == state.date
def _parse_date_interval_state(self, state, table_field):
queries = []
if state.start:
queries.append(table_field >= state.start)
if state.end:
queries.append(func.DATE(table_field) <= state.end)
if queries:
return AND(*queries)
| lgpl-2.1 |
dhanunjaya/neutron | neutron/tests/unit/services/metering/test_metering_plugin.py | 34 | 20852 | # Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from neutron.api.v2 import attributes as attr
from neutron import context
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db
from neutron.db.metering import metering_rpc
from neutron.extensions import l3 as ext_l3
from neutron.extensions import metering as ext_metering
from neutron import manager
from neutron.plugins.common import constants
from neutron.tests.common import helpers
from neutron.tests import tools
from neutron.tests.unit.db.metering import test_metering_db
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
_uuid = uuidutils.generate_uuid
METERING_SERVICE_PLUGIN_KLASS = (
"neutron.services.metering."
"metering_plugin.MeteringPlugin"
)
class MeteringTestExtensionManager(object):
def get_resources(self):
attr.RESOURCE_ATTRIBUTE_MAP.update(ext_metering.RESOURCE_ATTRIBUTE_MAP)
attr.RESOURCE_ATTRIBUTE_MAP.update(ext_l3.RESOURCE_ATTRIBUTE_MAP)
l3_res = ext_l3.L3.get_resources()
metering_res = ext_metering.Metering.get_resources()
return l3_res + metering_res
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
test_metering_db.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), "/metering")
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self):
plugin = 'neutron.tests.unit.extensions.test_l3.TestL3NatIntPlugin'
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPlugin, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
uuid = 'oslo_utils.uuidutils.generate_uuid'
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
self.ctx = context.Context('', self.tenant_id, is_admin=True)
self.context_patch = mock.patch('neutron.context.Context',
return_value=self.ctx)
self.mock_context = self.context_patch.start()
self.topic = 'metering_agent'
add = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.add_metering_label')
self.add_patch = mock.patch(add)
self.mock_add = self.add_patch.start()
remove = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.remove_metering_label')
self.remove_patch = mock.patch(remove)
self.mock_remove = self.remove_patch.start()
update = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.update_metering_label_rules')
self.update_patch = mock.patch(update)
self.mock_update = self.update_patch.start()
add_rule = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.add_metering_label_rule')
self.add_rule_patch = mock.patch(add_rule)
self.mock_add_rule = self.add_rule_patch.start()
remove_rule = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.remove_metering_label_rule')
self.remove_rule_patch = mock.patch(remove_rule)
self.mock_remove_rule = self.remove_rule_patch.start()
def test_add_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid}],
'id': self.uuid}]
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
self.mock_uuid.return_value = second_uuid
with self.router(name='router2', tenant_id=tenant_id_2,
set_context=True):
self.mock_uuid.return_value = self.uuid
with self.router(name='router1', tenant_id=self.tenant_id,
set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_add.assert_called_with(self.ctx, expected)
def test_add_metering_label_shared_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid},
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
with self.router(name='router1', tenant_id=self.tenant_id,
set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_uuid.return_value = second_uuid
with self.metering_label(tenant_id=tenant_id_2, shared=True,
set_context=True):
self.mock_add.assert_called_with(self.ctx, expected)
def test_remove_metering_label_rpc_call(self):
expected = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid}],
'id': self.uuid}]
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True) as label:
self.mock_add.assert_called_with(self.ctx, expected)
self._delete('metering-labels',
label['metering_label']['id'])
self.mock_remove.assert_called_with(self.ctx, expected)
def test_remove_one_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid},
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]
expected_remove = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_uuid.return_value = second_uuid
with self.metering_label(tenant_id=self.tenant_id,
set_context=True) as label:
self.mock_add.assert_called_with(self.ctx, expected_add)
self._delete('metering-labels',
label['metering_label']['id'])
self.mock_remove.assert_called_with(self.ctx, expected_remove)
def test_add_and_remove_metering_label_rule_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rule': {
'remote_ip_prefix': '10.0.0.0/24',
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid},
'id': self.uuid}],
'id': self.uuid}]
expected_del = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rule': {
'remote_ip_prefix': '10.0.0.0/24',
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid},
'id': self.uuid}],
'id': self.uuid}]
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True) as label:
l = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(l['id']):
self.mock_add_rule.assert_called_with(self.ctx,
expected_add)
self._delete('metering-label-rules', second_uuid)
self.mock_remove_rule.assert_called_with(self.ctx,
expected_del)
def test_delete_metering_label_does_not_clear_router_tenant_id(self):
tenant_id = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
with self.metering_label(tenant_id=tenant_id) as metering_label:
with self.router(tenant_id=tenant_id, set_context=True) as r:
router = self._show('routers', r['router']['id'])
self.assertEqual(tenant_id, router['router']['tenant_id'])
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204)
router = self._show('routers', r['router']['id'])
self.assertEqual(tenant_id, router['router']['tenant_id'])
class TestMeteringPluginL3AgentScheduler(
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
test_metering_db.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), "/metering")
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self, plugin_str=None, service_plugins=None, scheduler=None):
if not plugin_str:
plugin_str = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatIntAgentSchedulingPlugin')
if not service_plugins:
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
if not scheduler:
scheduler = plugin_str
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPluginL3AgentScheduler,
self).setUp(plugin=plugin_str, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
uuid = 'oslo_utils.uuidutils.generate_uuid'
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
self.ctx = context.Context('', self.tenant_id, is_admin=True)
self.context_patch = mock.patch('neutron.context.Context',
return_value=self.ctx)
self.mock_context = self.context_patch.start()
self.l3routers_patch = mock.patch(scheduler +
'.get_l3_agents_hosting_routers')
self.l3routers_mock = self.l3routers_patch.start()
self.topic = 'metering_agent'
add = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.add_metering_label')
self.add_patch = mock.patch(add)
self.mock_add = self.add_patch.start()
remove = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.remove_metering_label')
self.remove_patch = mock.patch(remove)
self.mock_remove = self.remove_patch.start()
def test_add_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': self.uuid},
{'status': 'ACTIVE',
'name': 'router2',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': second_uuid}]
# bind each router to a specific agent
agent1 = agents_db.Agent(host='agent1')
agent2 = agents_db.Agent(host='agent2')
agents = {self.uuid: agent1,
second_uuid: agent2}
def side_effect(context, routers, admin_state_up, active):
return [agents[routers[0]]]
self.l3routers_mock.side_effect = side_effect
with self.router(name='router1', tenant_id=self.tenant_id,
set_context=True):
self.mock_uuid.return_value = second_uuid
with self.router(name='router2', tenant_id=self.tenant_id,
set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_add.assert_called_with(
self.ctx, tools.UnorderedList(expected))
class TestMeteringPluginL3AgentSchedulerServicePlugin(
TestMeteringPluginL3AgentScheduler):
"""Unit tests for the case where separate service plugin
implements L3 routing.
"""
def setUp(self):
l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatAgentSchedulingServicePlugin')
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS,
'l3_plugin_name': l3_plugin}
plugin_str = ('neutron.tests.unit.extensions.test_l3.'
'TestNoL3NatPlugin')
super(TestMeteringPluginL3AgentSchedulerServicePlugin, self).setUp(
plugin_str=plugin_str, service_plugins=service_plugins,
scheduler=l3_plugin)
class TestMeteringPluginRpcFromL3Agent(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
test_metering_db.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), "/metering")
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP
)
def setUp(self):
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
plugin = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatIntAgentSchedulingPlugin')
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPluginRpcFromL3Agent,
self).setUp(plugin=plugin, service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.meter_plugin = manager.NeutronManager.get_service_plugins().get(
constants.METERING)
self.tenant_id = 'admin_tenant_id'
self.tenant_id_1 = 'tenant_id_1'
self.tenant_id_2 = 'tenant_id_2'
self.adminContext = context.get_admin_context()
helpers.register_l3_agent(host='agent1')
def test_get_sync_data_metering(self):
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
with self.router(name='router1', subnet=subnet) as router:
r = router['router']
self._add_external_gateway_to_router(r['id'], s['network_id'])
with self.metering_label(tenant_id=r['tenant_id']):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext,
host='agent1')
self.assertEqual('router1', data[0]['name'])
helpers.register_l3_agent(host='agent2')
data = callbacks.get_sync_data_metering(self.adminContext,
host='agent2')
self.assertFalse(data)
self._remove_external_gateway_from_router(
r['id'], s['network_id'])
def test_get_sync_data_metering_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
with self.metering_label(tenant_id=self.tenant_id,
shared=True):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
routers = [router['name'] for router in data]
self.assertIn('router1', routers)
self.assertIn('router2', routers)
def test_get_sync_data_metering_not_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
with self.metering_label(tenant_id=self.tenant_id):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
routers = [router['name'] for router in data]
self.assertEqual([], routers)
| apache-2.0 |
MartinHjelmare/home-assistant | homeassistant/components/reddit/sensor.py | 7 | 4105 | """Support for Reddit."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_USERNAME, CONF_PASSWORD, CONF_MAXIMUM)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONF_SORT_BY = 'sort_by'
CONF_SUBREDDITS = 'subreddits'
ATTR_ID = 'id'
ATTR_BODY = 'body'
ATTR_COMMENTS_NUMBER = 'comms_num'
ATTR_CREATED = 'created'
ATTR_POSTS = 'posts'
ATTR_SUBREDDIT = 'subreddit'
ATTR_SCORE = 'score'
ATTR_TITLE = 'title'
ATTR_URL = 'url'
DEFAULT_NAME = 'Reddit'
DOMAIN = 'reddit'
LIST_TYPES = ['top', 'controversial', 'hot', 'new']
SCAN_INTERVAL = timedelta(seconds=300)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_SUBREDDITS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SORT_BY, default='hot'):
vol.All(cv.string, vol.In(LIST_TYPES)),
vol.Optional(CONF_MAXIMUM, default=10): cv.positive_int
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Reddit sensor platform."""
import praw
subreddits = config[CONF_SUBREDDITS]
user_agent = '{}_home_assistant_sensor'.format(config[CONF_USERNAME])
limit = config[CONF_MAXIMUM]
sort_by = config[CONF_SORT_BY]
try:
reddit = praw.Reddit(
client_id=config[CONF_CLIENT_ID],
client_secret=config[CONF_CLIENT_SECRET],
username=config[CONF_USERNAME],
password=config[CONF_PASSWORD],
user_agent=user_agent)
_LOGGER.debug('Connected to praw')
except praw.exceptions.PRAWException as err:
_LOGGER.error("Reddit error %s", err)
return
sensors = [RedditSensor(reddit, subreddit, limit, sort_by)
for subreddit in subreddits]
add_entities(sensors, True)
class RedditSensor(Entity):
"""Representation of a Reddit sensor."""
def __init__(self, reddit, subreddit: str, limit: int, sort_by: str):
"""Initialize the Reddit sensor."""
self._reddit = reddit
self._subreddit = subreddit
self._limit = limit
self._sort_by = sort_by
self._subreddit_data = []
@property
def name(self):
"""Return the name of the sensor."""
return 'reddit_{}'.format(self._subreddit)
@property
def state(self):
"""Return the state of the sensor."""
return len(self._subreddit_data)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_SUBREDDIT: self._subreddit,
ATTR_POSTS: self._subreddit_data,
CONF_SORT_BY: self._sort_by
}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return 'mdi:reddit'
def update(self):
"""Update data from Reddit API."""
import praw
self._subreddit_data = []
try:
subreddit = self._reddit.subreddit(self._subreddit)
if hasattr(subreddit, self._sort_by):
method_to_call = getattr(subreddit, self._sort_by)
for submission in method_to_call(limit=self._limit):
self._subreddit_data.append({
ATTR_ID: submission.id,
ATTR_URL: submission.url,
ATTR_TITLE: submission.title,
ATTR_SCORE: submission.score,
ATTR_COMMENTS_NUMBER: submission.num_comments,
ATTR_CREATED: submission.created,
ATTR_BODY: submission.selftext
})
except praw.exceptions.PRAWException as err:
_LOGGER.error("Reddit error %s", err)
| apache-2.0 |
xupei0610/ComputerGraphics-HW | final/lib/assimp/port/PyAssimp/scripts/3d_viewer_py3.py | 6 | 41620 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
""" This program loads a model with PyASSIMP, and display it.
Based on:
- pygame code from http://3dengine.org/Spectator_%28PyOpenGL%29
- http://www.lighthouse3d.com/tutorials
- http://www.songho.ca/opengl/gl_transform.html
- http://code.activestate.com/recipes/325391/
- ASSIMP's C++ SimpleOpenGL viewer
Authors: Séverin Lemaignan, 2012-2016
"""
import sys
import logging
from functools import reduce
logger = logging.getLogger("pyassimp")
gllogger = logging.getLogger("OpenGL")
gllogger.setLevel(logging.WARNING)
logging.basicConfig(level=logging.INFO)
import OpenGL
OpenGL.ERROR_CHECKING = False
OpenGL.ERROR_LOGGING = False
# OpenGL.ERROR_ON_COPY = True
# OpenGL.FULL_LOGGING = True
from OpenGL.GL import *
from OpenGL.arrays import vbo
from OpenGL.GL import shaders
import pygame
import pygame.font
import pygame.image
import math, random
from numpy import linalg
import pyassimp
from pyassimp.postprocess import *
from pyassimp.helper import *
import transformations
ROTATION_180_X = numpy.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]], dtype=numpy.float32)
# rendering mode
BASE = "BASE"
COLORS = "COLORS"
SILHOUETTE = "SILHOUETTE"
HELPERS = "HELPERS"
# Entities type
ENTITY = "entity"
CAMERA = "camera"
MESH = "mesh"
FLAT_VERTEX_SHADER_120 = """
#version 120
uniform mat4 u_viewProjectionMatrix;
uniform mat4 u_modelMatrix;
uniform vec4 u_materialDiffuse;
attribute vec3 a_vertex;
varying vec4 v_color;
void main(void)
{
v_color = u_materialDiffuse;
gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0);
}
"""
FLAT_VERTEX_SHADER_130 = """
#version 130
uniform mat4 u_viewProjectionMatrix;
uniform mat4 u_modelMatrix;
uniform vec4 u_materialDiffuse;
in vec3 a_vertex;
out vec4 v_color;
void main(void)
{
v_color = u_materialDiffuse;
gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0);
}
"""
BASIC_VERTEX_SHADER_120 = """
#version 120
uniform mat4 u_viewProjectionMatrix;
uniform mat4 u_modelMatrix;
uniform mat3 u_normalMatrix;
uniform vec3 u_lightPos;
uniform vec4 u_materialDiffuse;
attribute vec3 a_vertex;
attribute vec3 a_normal;
varying vec4 v_color;
void main(void)
{
// Now the normal is in world space, as we pass the light in world space.
vec3 normal = u_normalMatrix * a_normal;
float dist = distance(a_vertex, u_lightPos);
// go to https://www.desmos.com/calculator/nmnaud1hrw to play with the parameters
// att is not used for now
float att=1.0/(1.0+0.8*dist*dist);
vec3 surf2light = normalize(u_lightPos - a_vertex);
vec3 norm = normalize(normal);
float dcont=max(0.0,dot(norm,surf2light));
float ambient = 0.3;
float intensity = dcont + 0.3 + ambient;
v_color = u_materialDiffuse * intensity;
gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0);
}
"""
BASIC_VERTEX_SHADER_130 = """
#version 130
uniform mat4 u_viewProjectionMatrix;
uniform mat4 u_modelMatrix;
uniform mat3 u_normalMatrix;
uniform vec3 u_lightPos;
uniform vec4 u_materialDiffuse;
in vec3 a_vertex;
in vec3 a_normal;
out vec4 v_color;
void main(void)
{
// Now the normal is in world space, as we pass the light in world space.
vec3 normal = u_normalMatrix * a_normal;
float dist = distance(a_vertex, u_lightPos);
// go to https://www.desmos.com/calculator/nmnaud1hrw to play with the parameters
// att is not used for now
float att=1.0/(1.0+0.8*dist*dist);
vec3 surf2light = normalize(u_lightPos - a_vertex);
vec3 norm = normalize(normal);
float dcont=max(0.0,dot(norm,surf2light));
float ambient = 0.3;
float intensity = dcont + 0.3 + ambient;
v_color = u_materialDiffuse * intensity;
gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0);
}
"""
BASIC_FRAGMENT_SHADER_120 = """
#version 120
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
"""
BASIC_FRAGMENT_SHADER_130 = """
#version 130
in vec4 v_color;
void main() {
gl_FragColor = v_color;
}
"""
GOOCH_VERTEX_SHADER_120 = """
#version 120
// attributes
attribute vec3 a_vertex; // xyz - position
attribute vec3 a_normal; // xyz - normal
// uniforms
uniform mat4 u_modelMatrix;
uniform mat4 u_viewProjectionMatrix;
uniform mat3 u_normalMatrix;
uniform vec3 u_lightPos;
uniform vec3 u_camPos;
// output data from vertex to fragment shader
varying vec3 o_normal;
varying vec3 o_lightVector;
///////////////////////////////////////////////////////////////////
void main(void)
{
// transform position and normal to world space
vec4 positionWorld = u_modelMatrix * vec4(a_vertex, 1.0);
vec3 normalWorld = u_normalMatrix * a_normal;
// calculate and pass vectors required for lighting
o_lightVector = u_lightPos - positionWorld.xyz;
o_normal = normalWorld;
// project world space position to the screen and output it
gl_Position = u_viewProjectionMatrix * positionWorld;
}
"""
GOOCH_VERTEX_SHADER_130 = """
#version 130
// attributes
in vec3 a_vertex; // xyz - position
in vec3 a_normal; // xyz - normal
// uniforms
uniform mat4 u_modelMatrix;
uniform mat4 u_viewProjectionMatrix;
uniform mat3 u_normalMatrix;
uniform vec3 u_lightPos;
uniform vec3 u_camPos;
// output data from vertex to fragment shader
out vec3 o_normal;
out vec3 o_lightVector;
///////////////////////////////////////////////////////////////////
void main(void)
{
// transform position and normal to world space
vec4 positionWorld = u_modelMatrix * vec4(a_vertex, 1.0);
vec3 normalWorld = u_normalMatrix * a_normal;
// calculate and pass vectors required for lighting
o_lightVector = u_lightPos - positionWorld.xyz;
o_normal = normalWorld;
// project world space position to the screen and output it
gl_Position = u_viewProjectionMatrix * positionWorld;
}
"""
GOOCH_FRAGMENT_SHADER_120 = """
#version 120
// data from vertex shader
varying vec3 o_normal;
varying vec3 o_lightVector;
// diffuse color of the object
uniform vec4 u_materialDiffuse;
// cool color of gooch shading
uniform vec3 u_coolColor;
// warm color of gooch shading
uniform vec3 u_warmColor;
// how much to take from object color in final cool color
uniform float u_alpha;
// how much to take from object color in final warm color
uniform float u_beta;
///////////////////////////////////////////////////////////
void main(void)
{
// normlize vectors for lighting
vec3 normalVector = normalize(o_normal);
vec3 lightVector = normalize(o_lightVector);
// intensity of diffuse lighting [-1, 1]
float diffuseLighting = dot(lightVector, normalVector);
// map intensity of lighting from range [-1; 1] to [0, 1]
float interpolationValue = (1.0 + diffuseLighting)/2;
//////////////////////////////////////////////////////////////////
// cool color mixed with color of the object
vec3 coolColorMod = u_coolColor + vec3(u_materialDiffuse) * u_alpha;
// warm color mixed with color of the object
vec3 warmColorMod = u_warmColor + vec3(u_materialDiffuse) * u_beta;
// interpolation of cool and warm colors according
// to lighting intensity. The lower the light intensity,
// the larger part of the cool color is used
vec3 colorOut = mix(coolColorMod, warmColorMod, interpolationValue);
//////////////////////////////////////////////////////////////////
// save color
gl_FragColor.rgb = colorOut;
gl_FragColor.a = 1;
}
"""
GOOCH_FRAGMENT_SHADER_130 = """
#version 130
// data from vertex shader
in vec3 o_normal;
in vec3 o_lightVector;
// diffuse color of the object
uniform vec4 u_materialDiffuse;
// cool color of gooch shading
uniform vec3 u_coolColor;
// warm color of gooch shading
uniform vec3 u_warmColor;
// how much to take from object color in final cool color
uniform float u_alpha;
// how much to take from object color in final warm color
uniform float u_beta;
// output to framebuffer
out vec4 resultingColor;
///////////////////////////////////////////////////////////
void main(void)
{
// normlize vectors for lighting
vec3 normalVector = normalize(o_normal);
vec3 lightVector = normalize(o_lightVector);
// intensity of diffuse lighting [-1, 1]
float diffuseLighting = dot(lightVector, normalVector);
// map intensity of lighting from range [-1; 1] to [0, 1]
float interpolationValue = (1.0 + diffuseLighting)/2;
//////////////////////////////////////////////////////////////////
// cool color mixed with color of the object
vec3 coolColorMod = u_coolColor + vec3(u_materialDiffuse) * u_alpha;
// warm color mixed with color of the object
vec3 warmColorMod = u_warmColor + vec3(u_materialDiffuse) * u_beta;
// interpolation of cool and warm colors according
// to lighting intensity. The lower the light intensity,
// the larger part of the cool color is used
vec3 colorOut = mix(coolColorMod, warmColorMod, interpolationValue);
//////////////////////////////////////////////////////////////////
// save color
resultingColor.rgb = colorOut;
resultingColor.a = 1;
}
"""
SILHOUETTE_VERTEX_SHADER_120 = """
#version 120
attribute vec3 a_vertex; // xyz - position
attribute vec3 a_normal; // xyz - normal
uniform mat4 u_modelMatrix;
uniform mat4 u_viewProjectionMatrix;
uniform mat4 u_modelViewMatrix;
uniform vec4 u_materialDiffuse;
uniform float u_bordersize; // width of the border
varying vec4 v_color;
void main(void){
v_color = u_materialDiffuse;
float distToCamera = -(u_modelViewMatrix * vec4(a_vertex, 1.0)).z;
vec4 tPos = vec4(a_vertex + a_normal * u_bordersize * distToCamera, 1.0);
gl_Position = u_viewProjectionMatrix * u_modelMatrix * tPos;
}
"""
SILHOUETTE_VERTEX_SHADER_130 = """
#version 130
in vec3 a_vertex; // xyz - position
in vec3 a_normal; // xyz - normal
uniform mat4 u_modelMatrix;
uniform mat4 u_viewProjectionMatrix;
uniform mat4 u_modelViewMatrix;
uniform vec4 u_materialDiffuse;
uniform float u_bordersize; // width of the border
out vec4 v_color;
void main(void){
v_color = u_materialDiffuse;
float distToCamera = -(u_modelViewMatrix * vec4(a_vertex, 1.0)).z;
vec4 tPos = vec4(a_vertex + a_normal * u_bordersize * distToCamera, 1.0);
gl_Position = u_viewProjectionMatrix * u_modelMatrix * tPos;
}
"""
DEFAULT_CLIP_PLANE_NEAR = 0.001
DEFAULT_CLIP_PLANE_FAR = 1000.0
def get_world_transform(scene, node):
if node == scene.rootnode:
return numpy.identity(4, dtype=numpy.float32)
parents = reversed(_get_parent_chain(scene, node, []))
parent_transform = reduce(numpy.dot, [p.transformation for p in parents])
return numpy.dot(parent_transform, node.transformation)
def _get_parent_chain(scene, node, parents):
parent = node.parent
parents.append(parent)
if parent == scene.rootnode:
return parents
return _get_parent_chain(scene, parent, parents)
class DefaultCamera:
def __init__(self, w, h, fov):
self.name = "default camera"
self.type = CAMERA
self.clipplanenear = DEFAULT_CLIP_PLANE_NEAR
self.clipplanefar = DEFAULT_CLIP_PLANE_FAR
self.aspect = w / h
self.horizontalfov = fov * math.pi / 180
self.transformation = numpy.array([[0.68, -0.32, 0.65, 7.48],
[0.73, 0.31, -0.61, -6.51],
[-0.01, 0.89, 0.44, 5.34],
[0., 0., 0., 1.]], dtype=numpy.float32)
self.transformation = numpy.dot(self.transformation, ROTATION_180_X)
def __str__(self):
return self.name
class PyAssimp3DViewer:
base_name = "PyASSIMP 3D viewer"
def __init__(self, model, w=1024, h=768):
self.w = w
self.h = h
pygame.init()
pygame.display.set_caption(self.base_name)
pygame.display.set_mode((w, h), pygame.OPENGL | pygame.DOUBLEBUF)
glClearColor(0.18, 0.18, 0.18, 1.0)
shader_compilation_succeeded = False
try:
self.set_shaders_v130()
self.prepare_shaders()
except RuntimeError, message:
sys.stderr.write("%s\n" % message)
sys.stdout.write("Could not compile shaders in version 1.30, trying version 1.20\n")
if not shader_compilation_succeeded:
self.set_shaders_v120()
self.prepare_shaders()
self.scene = None
self.meshes = {} # stores the OpenGL vertex/faces/normals buffers pointers
self.node2colorid = {} # stores a color ID for each node. Useful for mouse picking and visibility checking
self.colorid2node = {} # reverse dict of node2colorid
self.currently_selected = None
self.moving = False
self.moving_situation = None
self.default_camera = DefaultCamera(self.w, self.h, fov=70)
self.cameras = [self.default_camera]
self.current_cam_index = 0
self.current_cam = self.default_camera
self.set_camera_projection()
self.load_model(model)
# user interactions
self.focal_point = [0, 0, 0]
self.is_rotating = False
self.is_panning = False
self.is_zooming = False
def set_shaders_v120(self):
self.BASIC_VERTEX_SHADER = BASIC_VERTEX_SHADER_120
self.FLAT_VERTEX_SHADER = FLAT_VERTEX_SHADER_120
self.SILHOUETTE_VERTEX_SHADER = SILHOUETTE_VERTEX_SHADER_120
self.GOOCH_VERTEX_SHADER = GOOCH_VERTEX_SHADER_120
self.BASIC_FRAGMENT_SHADER = BASIC_FRAGMENT_SHADER_120
self.GOOCH_FRAGMENT_SHADER = GOOCH_FRAGMENT_SHADER_120
def set_shaders_v130(self):
self.BASIC_VERTEX_SHADER = BASIC_VERTEX_SHADER_130
self.FLAT_VERTEX_SHADER = FLAT_VERTEX_SHADER_130
self.SILHOUETTE_VERTEX_SHADER = SILHOUETTE_VERTEX_SHADER_130
self.GOOCH_VERTEX_SHADER = GOOCH_VERTEX_SHADER_130
self.BASIC_FRAGMENT_SHADER = BASIC_FRAGMENT_SHADER_130
self.GOOCH_FRAGMENT_SHADER = GOOCH_FRAGMENT_SHADER_130
def prepare_shaders(self):
### Base shader
vertex = shaders.compileShader(self.BASIC_VERTEX_SHADER, GL_VERTEX_SHADER)
fragment = shaders.compileShader(self.BASIC_FRAGMENT_SHADER, GL_FRAGMENT_SHADER)
self.shader = shaders.compileProgram(vertex, fragment)
self.set_shader_accessors(('u_modelMatrix',
'u_viewProjectionMatrix',
'u_normalMatrix',
'u_lightPos',
'u_materialDiffuse'),
('a_vertex',
'a_normal'), self.shader)
### Flat shader
flatvertex = shaders.compileShader(self.FLAT_VERTEX_SHADER, GL_VERTEX_SHADER)
self.flatshader = shaders.compileProgram(flatvertex, fragment)
self.set_shader_accessors(('u_modelMatrix',
'u_viewProjectionMatrix',
'u_materialDiffuse',),
('a_vertex',), self.flatshader)
### Silhouette shader
silh_vertex = shaders.compileShader(self.SILHOUETTE_VERTEX_SHADER, GL_VERTEX_SHADER)
self.silhouette_shader = shaders.compileProgram(silh_vertex, fragment)
self.set_shader_accessors(('u_modelMatrix',
'u_viewProjectionMatrix',
'u_modelViewMatrix',
'u_materialDiffuse',
'u_bordersize' # width of the silhouette
),
('a_vertex',
'a_normal'), self.silhouette_shader)
### Gooch shader
gooch_vertex = shaders.compileShader(self.GOOCH_VERTEX_SHADER, GL_VERTEX_SHADER)
gooch_fragment = shaders.compileShader(self.GOOCH_FRAGMENT_SHADER, GL_FRAGMENT_SHADER)
self.gooch_shader = shaders.compileProgram(gooch_vertex, gooch_fragment)
self.set_shader_accessors(('u_modelMatrix',
'u_viewProjectionMatrix',
'u_normalMatrix',
'u_lightPos',
'u_materialDiffuse',
'u_coolColor',
'u_warmColor',
'u_alpha',
'u_beta'
),
('a_vertex',
'a_normal'), self.gooch_shader)
@staticmethod
def set_shader_accessors(uniforms, attributes, shader):
# add accessors to the shaders uniforms and attributes
for uniform in uniforms:
location = glGetUniformLocation(shader, uniform)
if location in (None, -1):
raise RuntimeError('No uniform: %s (maybe it is not used '
'anymore and has been optimized out by'
' the shader compiler)' % uniform)
setattr(shader, uniform, location)
for attribute in attributes:
location = glGetAttribLocation(shader, attribute)
if location in (None, -1):
raise RuntimeError('No attribute: %s' % attribute)
setattr(shader, attribute, location)
@staticmethod
def prepare_gl_buffers(mesh):
mesh.gl = {}
# Fill the buffer for vertex and normals positions
v = numpy.array(mesh.vertices, 'f')
n = numpy.array(mesh.normals, 'f')
mesh.gl["vbo"] = vbo.VBO(numpy.hstack((v, n)))
# Fill the buffer for vertex positions
mesh.gl["faces"] = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["faces"])
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
numpy.array(mesh.faces, dtype=numpy.int32),
GL_STATIC_DRAW)
mesh.gl["nbfaces"] = len(mesh.faces)
# Unbind buffers
glBindBuffer(GL_ARRAY_BUFFER, 0)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
@staticmethod
def get_rgb_from_colorid(colorid):
r = (colorid >> 0) & 0xff
g = (colorid >> 8) & 0xff
b = (colorid >> 16) & 0xff
return r, g, b
def get_color_id(self):
id = random.randint(0, 256 * 256 * 256)
if id not in self.colorid2node:
return id
else:
return self.get_color_id()
def glize(self, scene, node):
logger.info("Loading node <%s>" % node)
node.selected = True if self.currently_selected and self.currently_selected == node else False
node.transformation = node.transformation.astype(numpy.float32)
if node.meshes:
node.type = MESH
colorid = self.get_color_id()
self.colorid2node[colorid] = node
self.node2colorid[node.name] = colorid
elif node.name in [c.name for c in scene.cameras]:
# retrieve the ASSIMP camera object
[cam] = [c for c in scene.cameras if c.name == node.name]
node.type = CAMERA
logger.info("Added camera <%s>" % node.name)
logger.info("Camera position: %.3f, %.3f, %.3f" % tuple(node.transformation[:, 3][:3].tolist()))
self.cameras.append(node)
node.clipplanenear = cam.clipplanenear
node.clipplanefar = cam.clipplanefar
if numpy.allclose(cam.lookat, [0, 0, -1]) and numpy.allclose(cam.up, [0, 1, 0]): # Cameras in .blend files
# Rotate by 180deg around X to have Z pointing forward
node.transformation = numpy.dot(node.transformation, ROTATION_180_X)
else:
raise RuntimeError(
"I do not know how to normalize this camera orientation: lookat=%s, up=%s" % (cam.lookat, cam.up))
if cam.aspect == 0.0:
logger.warning("Camera aspect not set. Setting to default 4:3")
node.aspect = 1.333
else:
node.aspect = cam.aspect
node.horizontalfov = cam.horizontalfov
else:
node.type = ENTITY
for child in node.children:
self.glize(scene, child)
def load_model(self, path, postprocess=aiProcessPreset_TargetRealtime_MaxQuality):
logger.info("Loading model:" + path + "...")
if postprocess:
self.scene = pyassimp.load(path, processing=postprocess)
else:
self.scene = pyassimp.load(path)
logger.info("Done.")
scene = self.scene
# log some statistics
logger.info(" meshes: %d" % len(scene.meshes))
logger.info(" total faces: %d" % sum([len(mesh.faces) for mesh in scene.meshes]))
logger.info(" materials: %d" % len(scene.materials))
self.bb_min, self.bb_max = get_bounding_box(self.scene)
logger.info(" bounding box:" + str(self.bb_min) + " - " + str(self.bb_max))
self.scene_center = [(a + b) / 2. for a, b in zip(self.bb_min, self.bb_max)]
for index, mesh in enumerate(scene.meshes):
self.prepare_gl_buffers(mesh)
self.glize(scene, scene.rootnode)
# Finally release the model
pyassimp.release(scene)
logger.info("Ready for 3D rendering!")
def cycle_cameras(self):
self.current_cam_index = (self.current_cam_index + 1) % len(self.cameras)
self.current_cam = self.cameras[self.current_cam_index]
self.set_camera_projection(self.current_cam)
logger.info("Switched to camera <%s>" % self.current_cam)
def set_overlay_projection(self):
glViewport(0, 0, self.w, self.h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, self.w - 1.0, 0.0, self.h - 1.0, -1.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def set_camera_projection(self, camera=None):
if not camera:
camera = self.current_cam
znear = camera.clipplanenear or DEFAULT_CLIP_PLANE_NEAR
zfar = camera.clipplanefar or DEFAULT_CLIP_PLANE_FAR
aspect = camera.aspect
fov = camera.horizontalfov
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
# Compute gl frustrum
tangent = math.tan(fov / 2.)
h = znear * tangent
w = h * aspect
# params: left, right, bottom, top, near, far
glFrustum(-w, w, -h, h, znear, zfar)
# equivalent to:
# gluPerspective(fov * 180/math.pi, aspect, znear, zfar)
self.projection_matrix = glGetFloatv(GL_PROJECTION_MATRIX).transpose()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def render_colors(self):
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glEnable(GL_CULL_FACE)
glUseProgram(self.flatshader)
glUniformMatrix4fv(self.flatshader.u_viewProjectionMatrix, 1, GL_TRUE,
numpy.dot(self.projection_matrix, self.view_matrix))
self.recursive_render(self.scene.rootnode, self.flatshader, mode=COLORS)
glUseProgram(0)
def get_hovered_node(self, mousex, mousey):
"""
Attention: The performances of this method relies heavily on the size of the display!
"""
# mouse out of the window?
if mousex < 0 or mousex >= self.w or mousey < 0 or mousey >= self.h:
return None
self.render_colors()
# Capture image from the OpenGL buffer
buf = (GLubyte * (3 * self.w * self.h))(0)
glReadPixels(0, 0, self.w, self.h, GL_RGB, GL_UNSIGNED_BYTE, buf)
# Reinterpret the RGB pixel buffer as a 1-D array of 24bits colors
a = numpy.ndarray(len(buf), numpy.dtype('>u1'), buf)
colors = numpy.zeros(len(buf) // 3, numpy.dtype('<u4'))
for i in range(3):
colors.view(dtype='>u1')[i::4] = a.view(dtype='>u1')[i::3]
colorid = colors[mousex + mousey * self.w]
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
if colorid in self.colorid2node:
return self.colorid2node[colorid]
def render(self, wireframe=False, twosided=False):
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE if wireframe else GL_FILL)
glDisable(GL_CULL_FACE) if twosided else glEnable(GL_CULL_FACE)
self.render_grid()
self.recursive_render(self.scene.rootnode, None, mode=HELPERS)
### First, the silhouette
if False:
shader = self.silhouette_shader
# glDepthMask(GL_FALSE)
glCullFace(GL_FRONT) # cull front faces
glUseProgram(shader)
glUniform1f(shader.u_bordersize, 0.01)
glUniformMatrix4fv(shader.u_viewProjectionMatrix, 1, GL_TRUE,
numpy.dot(self.projection_matrix, self.view_matrix))
self.recursive_render(self.scene.rootnode, shader, mode=SILHOUETTE)
glUseProgram(0)
### Then, inner shading
# glDepthMask(GL_TRUE)
glCullFace(GL_BACK)
use_gooch = False
if use_gooch:
shader = self.gooch_shader
glUseProgram(shader)
glUniform3f(shader.u_lightPos, -.5, -.5, .5)
##### GOOCH specific
glUniform3f(shader.u_coolColor, 159.0 / 255, 148.0 / 255, 255.0 / 255)
glUniform3f(shader.u_warmColor, 255.0 / 255, 75.0 / 255, 75.0 / 255)
glUniform1f(shader.u_alpha, .25)
glUniform1f(shader.u_beta, .25)
#########
else:
shader = self.shader
glUseProgram(shader)
glUniform3f(shader.u_lightPos, -.5, -.5, .5)
glUniformMatrix4fv(shader.u_viewProjectionMatrix, 1, GL_TRUE,
numpy.dot(self.projection_matrix, self.view_matrix))
self.recursive_render(self.scene.rootnode, shader)
glUseProgram(0)
def render_axis(self,
transformation=numpy.identity(4, dtype=numpy.float32),
label=None,
size=0.2,
selected=False):
m = transformation.transpose() # OpenGL row major
glPushMatrix()
glMultMatrixf(m)
glLineWidth(3 if selected else 1)
size = 2 * size if selected else size
glBegin(GL_LINES)
# draw line for x axis
glColor3f(1.0, 0.0, 0.0)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(size, 0.0, 0.0)
# draw line for y axis
glColor3f(0.0, 1.0, 0.0)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(0.0, size, 0.0)
# draw line for Z axis
glColor3f(0.0, 0.0, 1.0)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(0.0, 0.0, size)
glEnd()
if label:
self.showtext(label)
glPopMatrix()
@staticmethod
def render_camera(camera, transformation):
m = transformation.transpose() # OpenGL row major
aspect = camera.aspect
u = 0.1 # unit size (in m)
l = 3 * u # length of the camera cone
f = 3 * u # aperture of the camera cone
glPushMatrix()
glMultMatrixf(m)
glLineWidth(2)
glBegin(GL_LINE_STRIP)
glColor3f(.2, .2, .2)
glVertex3f(u, u, -u)
glVertex3f(u, -u, -u)
glVertex3f(-u, -u, -u)
glVertex3f(-u, u, -u)
glVertex3f(u, u, -u)
glVertex3f(u, u, 0.0)
glVertex3f(u, -u, 0.0)
glVertex3f(-u, -u, 0.0)
glVertex3f(-u, u, 0.0)
glVertex3f(u, u, 0.0)
glVertex3f(f * aspect, f, l)
glVertex3f(f * aspect, -f, l)
glVertex3f(-f * aspect, -f, l)
glVertex3f(-f * aspect, f, l)
glVertex3f(f * aspect, f, l)
glEnd()
glBegin(GL_LINE_STRIP)
glVertex3f(u, -u, -u)
glVertex3f(u, -u, 0.0)
glVertex3f(f * aspect, -f, l)
glEnd()
glBegin(GL_LINE_STRIP)
glVertex3f(-u, -u, -u)
glVertex3f(-u, -u, 0.0)
glVertex3f(-f * aspect, -f, l)
glEnd()
glBegin(GL_LINE_STRIP)
glVertex3f(-u, u, -u)
glVertex3f(-u, u, 0.0)
glVertex3f(-f * aspect, f, l)
glEnd()
glPopMatrix()
@staticmethod
def render_grid():
glLineWidth(1)
glColor3f(0.5, 0.5, 0.5)
glBegin(GL_LINES)
for i in range(-10, 11):
glVertex3f(i, -10.0, 0.0)
glVertex3f(i, 10.0, 0.0)
for i in range(-10, 11):
glVertex3f(-10.0, i, 0.0)
glVertex3f(10.0, i, 0.0)
glEnd()
def recursive_render(self, node, shader, mode=BASE, with_normals=True):
""" Main recursive rendering method.
"""
normals = with_normals
if mode == COLORS:
normals = False
if not hasattr(node, "selected"):
node.selected = False
m = get_world_transform(self.scene, node)
# HELPERS mode
###
if mode == HELPERS:
# if node.type == ENTITY:
self.render_axis(m,
label=node.name if node != self.scene.rootnode else None,
selected=node.selected if hasattr(node, "selected") else False)
if node.type == CAMERA:
self.render_camera(node, m)
for child in node.children:
self.recursive_render(child, shader, mode)
return
# Mesh rendering modes
###
if node.type == MESH:
for mesh in node.meshes:
stride = 24 # 6 * 4 bytes
if node.selected and mode == SILHOUETTE:
glUniform4f(shader.u_materialDiffuse, 1.0, 0.0, 0.0, 1.0)
glUniformMatrix4fv(shader.u_modelViewMatrix, 1, GL_TRUE,
numpy.dot(self.view_matrix, m))
else:
if mode == COLORS:
colorid = self.node2colorid[node.name]
r, g, b = self.get_rgb_from_colorid(colorid)
glUniform4f(shader.u_materialDiffuse, r / 255.0, g / 255.0, b / 255.0, 1.0)
elif mode == SILHOUETTE:
glUniform4f(shader.u_materialDiffuse, .0, .0, .0, 1.0)
else:
if node.selected:
diffuse = (1.0, 0.0, 0.0, 1.0) # selected nodes in red
else:
diffuse = mesh.material.properties["diffuse"]
if len(diffuse) == 3: # RGB instead of expected RGBA
diffuse.append(1.0)
glUniform4f(shader.u_materialDiffuse, *diffuse)
# if ambient:
# glUniform4f( shader.Material_ambient, *mat["ambient"] )
if mode == BASE: # not in COLORS or SILHOUETTE
normal_matrix = linalg.inv(numpy.dot(self.view_matrix, m)[0:3, 0:3]).transpose()
glUniformMatrix3fv(shader.u_normalMatrix, 1, GL_TRUE, normal_matrix)
glUniformMatrix4fv(shader.u_modelMatrix, 1, GL_TRUE, m)
vbo = mesh.gl["vbo"]
vbo.bind()
glEnableVertexAttribArray(shader.a_vertex)
if normals:
glEnableVertexAttribArray(shader.a_normal)
glVertexAttribPointer(
shader.a_vertex,
3, GL_FLOAT, False, stride, vbo
)
if normals:
glVertexAttribPointer(
shader.a_normal,
3, GL_FLOAT, False, stride, vbo + 12
)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["faces"])
glDrawElements(GL_TRIANGLES, mesh.gl["nbfaces"] * 3, GL_UNSIGNED_INT, None)
vbo.unbind()
glDisableVertexAttribArray(shader.a_vertex)
if normals:
glDisableVertexAttribArray(shader.a_normal)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
for child in node.children:
self.recursive_render(child, shader, mode)
def switch_to_overlay(self):
glPushMatrix()
self.set_overlay_projection()
def switch_from_overlay(self):
self.set_camera_projection()
glPopMatrix()
def select_node(self, node):
self.currently_selected = node
self.update_node_select(self.scene.rootnode)
def update_node_select(self, node):
if node is self.currently_selected:
node.selected = True
else:
node.selected = False
for child in node.children:
self.update_node_select(child)
def loop(self):
pygame.display.flip()
if not self.process_events():
return False # ESC has been pressed
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
return True
def process_events(self):
LEFT_BUTTON = 1
MIDDLE_BUTTON = 2
RIGHT_BUTTON = 3
WHEEL_UP = 4
WHEEL_DOWN = 5
dx, dy = pygame.mouse.get_rel()
mousex, mousey = pygame.mouse.get_pos()
zooming_one_shot = False
ok = True
for evt in pygame.event.get():
if evt.type == pygame.MOUSEBUTTONDOWN and evt.button == LEFT_BUTTON:
hovered = self.get_hovered_node(mousex, self.h - mousey)
if hovered:
if self.currently_selected and self.currently_selected == hovered:
self.select_node(None)
else:
logger.info("Node %s selected" % hovered)
self.select_node(hovered)
else:
self.is_rotating = True
if evt.type == pygame.MOUSEBUTTONUP and evt.button == LEFT_BUTTON:
self.is_rotating = False
if evt.type == pygame.MOUSEBUTTONDOWN and evt.button == MIDDLE_BUTTON:
self.is_panning = True
if evt.type == pygame.MOUSEBUTTONUP and evt.button == MIDDLE_BUTTON:
self.is_panning = False
if evt.type == pygame.MOUSEBUTTONDOWN and evt.button == RIGHT_BUTTON:
self.is_zooming = True
if evt.type == pygame.MOUSEBUTTONUP and evt.button == RIGHT_BUTTON:
self.is_zooming = False
if evt.type == pygame.MOUSEBUTTONDOWN and evt.button in [WHEEL_UP, WHEEL_DOWN]:
zooming_one_shot = True
self.is_zooming = True
dy = -10 if evt.button == WHEEL_UP else 10
if evt.type == pygame.KEYDOWN:
ok = (ok and self.process_keystroke(evt.key, evt.mod))
self.controls_3d(dx, dy, zooming_one_shot)
return ok
def process_keystroke(self, key, mod):
# process arrow keys if an object is selected
if self.currently_selected:
up = 0
strafe = 0
if key == pygame.K_UP:
up = 1
if key == pygame.K_DOWN:
up = -1
if key == pygame.K_LEFT:
strafe = -1
if key == pygame.K_RIGHT:
strafe = 1
self.move_selected_node(up, strafe)
if key == pygame.K_f:
pygame.display.toggle_fullscreen()
if key == pygame.K_TAB:
self.cycle_cameras()
if key in [pygame.K_ESCAPE, pygame.K_q]:
return False
return True
def controls_3d(self, dx, dy, zooming_one_shot=False):
CAMERA_TRANSLATION_FACTOR = 0.01
CAMERA_ROTATION_FACTOR = 0.01
if not (self.is_rotating or self.is_panning or self.is_zooming):
return
current_pos = self.current_cam.transformation[:3, 3].copy()
distance = numpy.linalg.norm(self.focal_point - current_pos)
if self.is_rotating:
""" Orbiting the camera is implemented the following way:
- the rotation is split into a rotation around the *world* Z axis
(controlled by the horizontal mouse motion along X) and a
rotation around the *X* axis of the camera (pitch) *shifted to
the focal origin* (the world origin for now). This is controlled
by the vertical motion of the mouse (Y axis).
- as a result, the resulting transformation of the camera in the
world frame C' is:
C' = (T · Rx · T⁻¹ · (Rz · C)⁻¹)⁻¹
where:
- C is the original camera transformation in the world frame,
- Rz is the rotation along the Z axis (in the world frame)
- T is the translation camera -> world (ie, the inverse of the
translation part of C
- Rx is the rotation around X in the (translated) camera frame
"""
rotation_camera_x = dy * CAMERA_ROTATION_FACTOR
rotation_world_z = dx * CAMERA_ROTATION_FACTOR
world_z_rotation = transformations.euler_matrix(0, 0, rotation_world_z)
cam_x_rotation = transformations.euler_matrix(rotation_camera_x, 0, 0)
after_world_z_rotation = numpy.dot(world_z_rotation, self.current_cam.transformation)
inverse_transformation = transformations.inverse_matrix(after_world_z_rotation)
translation = transformations.translation_matrix(
transformations.decompose_matrix(inverse_transformation)[3])
inverse_translation = transformations.inverse_matrix(translation)
new_inverse = numpy.dot(inverse_translation, inverse_transformation)
new_inverse = numpy.dot(cam_x_rotation, new_inverse)
new_inverse = numpy.dot(translation, new_inverse)
self.current_cam.transformation = transformations.inverse_matrix(new_inverse).astype(numpy.float32)
if self.is_panning:
tx = -dx * CAMERA_TRANSLATION_FACTOR * distance
ty = dy * CAMERA_TRANSLATION_FACTOR * distance
cam_transform = transformations.translation_matrix((tx, ty, 0)).astype(numpy.float32)
self.current_cam.transformation = numpy.dot(self.current_cam.transformation, cam_transform)
if self.is_zooming:
tz = dy * CAMERA_TRANSLATION_FACTOR * distance
cam_transform = transformations.translation_matrix((0, 0, tz)).astype(numpy.float32)
self.current_cam.transformation = numpy.dot(self.current_cam.transformation, cam_transform)
if zooming_one_shot:
self.is_zooming = False
self.update_view_camera()
def update_view_camera(self):
self.view_matrix = linalg.inv(self.current_cam.transformation)
# Rotate by 180deg around X to have Z pointing backward (OpenGL convention)
self.view_matrix = numpy.dot(ROTATION_180_X, self.view_matrix)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glMultMatrixf(self.view_matrix.transpose())
def move_selected_node(self, up, strafe):
self.currently_selected.transformation[0][3] += strafe
self.currently_selected.transformation[2][3] += up
@staticmethod
def showtext(text, x=0, y=0, z=0, size=20):
# TODO: alpha blending does not work...
# glEnable(GL_BLEND)
# glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
font = pygame.font.Font(None, size)
text_surface = font.render(text, True, (10, 10, 10, 255),
(255 * 0.18, 255 * 0.18, 255 * 0.18, 0))
text_data = pygame.image.tostring(text_surface, "RGBA", True)
glRasterPos3d(x, y, z)
glDrawPixels(text_surface.get_width(),
text_surface.get_height(),
GL_RGBA, GL_UNSIGNED_BYTE,
text_data)
# glDisable(GL_BLEND)
def main(model, width, height):
app = PyAssimp3DViewer(model, w=width, h=height)
clock = pygame.time.Clock()
while app.loop():
app.update_view_camera()
## Main rendering
app.render()
## GUI text display
app.switch_to_overlay()
app.showtext("Active camera: %s" % str(app.current_cam), 10, app.h - 30)
if app.currently_selected:
app.showtext("Selected node: %s" % app.currently_selected, 10, app.h - 50)
pos = app.h - 70
app.showtext("(%sm, %sm, %sm)" % (app.currently_selected.transformation[0, 3],
app.currently_selected.transformation[1, 3],
app.currently_selected.transformation[2, 3]), 30, pos)
app.switch_from_overlay()
# Make sure we do not go over 30fps
clock.tick(30)
logger.info("Quitting! Bye bye!")
#########################################################################
#########################################################################
if __name__ == '__main__':
if not len(sys.argv) > 1:
print("Usage: " + __file__ + " <model>")
sys.exit(2)
main(model=sys.argv[1], width=1024, height=768)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.