repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Talustus/dreamkernel_ics_sghi777 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
nexlab/domotikad | domotika/boards/modules/DMRv3.py | 1 | 1727 | ###########################################################################
# Copyright (c) 2011-2014 Unixmedia S.r.l. <info@unixmedia.it>
# Copyright (c) 2011-2014 Franco (nextime) Lanza <franco@unixmedia.it>
#
# Domotika System Controller Daemon "domotikad" [http://trac.unixmedia.it]
#
# This file is part of domotikad.
#
# domotikad is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from domotika.boards import iboards
from domotika.boards.boardtype import BaseBoard, ANABoard, INPBoard, OUTBoard
from zope.interface import implements
from twisted.plugin import IPlugin
try:
import hashlib
md5 = hashlib
md5.new = hashlib.md5
sha1 = hashlib.sha1
except:
import md5
import sha1
class DMBoard(INPBoard, ANABoard, OUTBoard, BaseBoard):
hasAnalogs = True
hasOutputs = True
hasInputs = True
hasRelays = True
hasAmperometers = True
fwtype = 'relaymaster'
class Board(object):
implements(IPlugin, iboards.IDMBoards)
def getBoard(self, host, port, pwd, lang):
return DMBoard(self.core, host, port, pwd, lang)
board=Board()
| gpl-3.0 |
moijes12/oh-mainline | vendor/packages/django-celery/docs/_ext/literals_to_xrefs.py | 48 | 4725 | """
Runs through a reST file looking for old-style literals, and helps replace them
with new-style references.
"""
import re
import sys
import shelve
refre = re.compile(r'``([^`\s]+?)``')
ROLES = (
'attr',
'class',
"djadmin",
'data',
'exc',
'file',
'func',
'lookup',
'meth',
'mod',
"djadminopt",
"ref",
"setting",
"term",
"tfilter",
"ttag",
# special
"skip",
)
ALWAYS_SKIP = [
"NULL",
"True",
"False",
]
def fixliterals(fname):
data = open(fname).read()
last = 0
new = []
storage = shelve.open("/tmp/literals_to_xref.shelve")
lastvalues = storage.get("lastvalues", {})
for m in refre.finditer(data):
new.append(data[last:m.start()])
last = m.end()
line_start = data.rfind("\n", 0, m.start())
line_end = data.find("\n", m.end())
prev_start = data.rfind("\n", 0, line_start)
next_end = data.find("\n", line_end + 1)
# Skip always-skip stuff
if m.group(1) in ALWAYS_SKIP:
new.append(m.group(0))
continue
# skip when the next line is a title
next_line = data[m.end():next_end].strip()
if next_line[0] in "!-/:-@[-`{-~" and \
all(c == next_line[0] for c in next_line):
new.append(m.group(0))
continue
sys.stdout.write("\n" + "-" * 80 + "\n")
sys.stdout.write(data[prev_start + 1:m.start()])
sys.stdout.write(colorize(m.group(0), fg="red"))
sys.stdout.write(data[m.end():next_end])
sys.stdout.write("\n\n")
replace_type = None
while replace_type is None:
replace_type = raw_input(
colorize("Replace role: ", fg="yellow")).strip().lower()
if replace_type and replace_type not in ROLES:
replace_type = None
if replace_type == "":
new.append(m.group(0))
continue
if replace_type == "skip":
new.append(m.group(0))
ALWAYS_SKIP.append(m.group(1))
continue
default = lastvalues.get(m.group(1), m.group(1))
if default.endswith("()") and \
replace_type in ("class", "func", "meth"):
default = default[:-2]
replace_value = raw_input(
colorize("Text <target> [", fg="yellow") + default + \
colorize("]: ", fg="yellow")).strip()
if not replace_value:
replace_value = default
new.append(":%s:`%s`" % (replace_type, replace_value))
lastvalues[m.group(1)] = replace_value
new.append(data[last:])
open(fname, "w").write("".join(new))
storage["lastvalues"] = lastvalues
storage.close()
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
"""
color_names = ('black', 'red', 'green', 'yellow',
'blue', 'magenta', 'cyan', 'white')
foreground = dict([(color_names[x], '3%s' % x) for x in range(8)])
background = dict([(color_names[x], '4%s' % x) for x in range(8)])
RESET = '0'
opt_dict = {'bold': '1',
'underscore': '4',
'blink': '5',
'reverse': '7',
'conceal': '8'}
text = str(text)
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.iteritems():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text
if __name__ == '__main__':
try:
fixliterals(sys.argv[1])
except (KeyboardInterrupt, SystemExit):
print
| agpl-3.0 |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/numpy/distutils/command/__init__.py | 264 | 1098 | """distutils.command
Package containing implementation of all the standard Distutils
commands.
"""
from __future__ import division, absolute_import, print_function
def test_na_writable_attributes_deletion():
a = np.NA(2)
attr = ['payload', 'dtype']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
distutils_all = [ #'build_py',
'clean',
'install_clib',
'install_scripts',
'bdist',
'bdist_dumb',
'bdist_wininst',
]
__import__('distutils.command', globals(), locals(), distutils_all)
__all__ = ['build',
'config_compiler',
'config',
'build_src',
'build_py',
'build_ext',
'build_clib',
'build_scripts',
'install',
'install_data',
'install_headers',
'install_lib',
'bdist_rpm',
'sdist',
] + distutils_all
| mit |
obiben/pokemongo-api | pogo/POGOProtos/Enums/ItemCategory_pb2.py | 16 | 3764 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Enums/ItemCategory.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Enums/ItemCategory.proto',
package='POGOProtos.Enums',
syntax='proto3',
serialized_pb=_b('\n#POGOProtos/Enums/ItemCategory.proto\x12\x10POGOProtos.Enums*\xd6\x02\n\x0cItemCategory\x12\x16\n\x12ITEM_CATEGORY_NONE\x10\x00\x12\x1a\n\x16ITEM_CATEGORY_POKEBALL\x10\x01\x12\x16\n\x12ITEM_CATEGORY_FOOD\x10\x02\x12\x1a\n\x16ITEM_CATEGORY_MEDICINE\x10\x03\x12\x17\n\x13ITEM_CATEGORY_BOOST\x10\x04\x12\x1a\n\x16ITEM_CATEGORY_UTILITES\x10\x05\x12\x18\n\x14ITEM_CATEGORY_CAMERA\x10\x06\x12\x16\n\x12ITEM_CATEGORY_DISK\x10\x07\x12\x1b\n\x17ITEM_CATEGORY_INCUBATOR\x10\x08\x12\x19\n\x15ITEM_CATEGORY_INCENSE\x10\t\x12\x1a\n\x16ITEM_CATEGORY_XP_BOOST\x10\n\x12#\n\x1fITEM_CATEGORY_INVENTORY_UPGRADE\x10\x0b\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ITEMCATEGORY = _descriptor.EnumDescriptor(
name='ItemCategory',
full_name='POGOProtos.Enums.ItemCategory',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_POKEBALL', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_FOOD', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_MEDICINE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_BOOST', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_UTILITES', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_CAMERA', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_DISK', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_INCUBATOR', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_INCENSE', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_XP_BOOST', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_CATEGORY_INVENTORY_UPGRADE', index=11, number=11,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=58,
serialized_end=400,
)
_sym_db.RegisterEnumDescriptor(_ITEMCATEGORY)
ItemCategory = enum_type_wrapper.EnumTypeWrapper(_ITEMCATEGORY)
ITEM_CATEGORY_NONE = 0
ITEM_CATEGORY_POKEBALL = 1
ITEM_CATEGORY_FOOD = 2
ITEM_CATEGORY_MEDICINE = 3
ITEM_CATEGORY_BOOST = 4
ITEM_CATEGORY_UTILITES = 5
ITEM_CATEGORY_CAMERA = 6
ITEM_CATEGORY_DISK = 7
ITEM_CATEGORY_INCUBATOR = 8
ITEM_CATEGORY_INCENSE = 9
ITEM_CATEGORY_XP_BOOST = 10
ITEM_CATEGORY_INVENTORY_UPGRADE = 11
DESCRIPTOR.enum_types_by_name['ItemCategory'] = _ITEMCATEGORY
# @@protoc_insertion_point(module_scope)
| mit |
Giftingnation/GN-Oscar-Custom | oscar/apps/customer/wishlists/views.py | 5 | 10774 | # -*- coding: utf-8 -*-
from django.contrib import messages
from django.core.exceptions import (
ObjectDoesNotExist, MultipleObjectsReturned, PermissionDenied)
from django.core.urlresolvers import reverse
from django.db.models import get_model
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, CreateView, UpdateView, DeleteView, View, FormView
from django.utils.translation import ugettext_lazy as _
from oscar.apps.customer.mixins import PageTitleMixin
from oscar.core.loading import get_classes
WishList = get_model('wishlists', 'WishList')
Line = get_model('wishlists', 'Line')
Product = get_model('catalogue', 'Product')
WishListForm, LineFormset = get_classes('wishlists.forms',
['WishListForm', 'LineFormset'])
class WishListListView(PageTitleMixin, ListView):
context_object_name = active_tab = "wishlists"
template_name = 'customer/wishlists/wishlists_list.html'
page_title = _('Wish Lists')
def get_queryset(self):
return self.request.user.wishlists.all()
class WishListDetailView(PageTitleMixin, FormView):
"""
This view acts as a DetailView for a wish list and allows updating the
quantities of products.
It is implemented as FormView because it's easier to adapt a FormView to
display a product then adapt a DetailView to handle form validation.
"""
template_name = 'customer/wishlists/wishlists_detail.html'
active_tab = "wishlists"
form_class = LineFormset
def dispatch(self, request, *args, **kwargs):
self.object = self.get_wishlist_or_404(kwargs['key'], request.user)
return super(WishListDetailView, self).dispatch(request, *args, **kwargs)
def get_wishlist_or_404(self, key, user):
wishlist = get_object_or_404(WishList, key=key)
if wishlist.is_allowed_to_see(user):
return wishlist
else:
raise Http404
def get_page_title(self):
return self.object.name
def get_form_kwargs(self):
kwargs = super(WishListDetailView, self).get_form_kwargs()
kwargs['instance'] = self.object
return kwargs
def get_context_data(self, **kwargs):
ctx = super(WishListDetailView, self).get_context_data(**kwargs)
ctx['wishlist'] = self.object
other_wishlists = self.request.user.wishlists.exclude(
pk=self.object.pk)
ctx['other_wishlists'] = other_wishlists
return ctx
def form_valid(self, form):
for subform in form:
if subform.cleaned_data['quantity'] <= 0:
subform.instance.delete()
else:
subform.save()
messages.success(self.request, _('Quantities updated.'))
return HttpResponseRedirect(reverse('customer:wishlists-detail',
kwargs= {'key': self.object.key}))
class WishListCreateView(PageTitleMixin, CreateView):
"""
Create a new wishlist
If a product ID is assed as a kwargs, then this product will be added to
the wishlist.
"""
model = WishList
template_name = 'customer/wishlists/wishlists_form.html'
active_tab = "wishlists"
page_title = _('Create a new wish list')
form_class = WishListForm
product = None
def dispatch(self, request, *args, **kwargs):
if 'product_pk' in kwargs:
try:
self.product = Product.objects.get(pk=kwargs['product_pk'])
except ObjectDoesNotExist:
messages.error(
request, _("The requested product no longer exists"))
return HttpResponseRedirect(reverse('wishlists-create'))
return super(WishListCreateView, self).dispatch(
request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(WishListCreateView, self).get_context_data(**kwargs)
ctx['product'] = self.product
return ctx
def get_form_kwargs(self):
kwargs = super(WishListCreateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
wishlist = form.save()
if self.product:
wishlist.add(self.product)
msg = _("Your wishlist has been created and '%(name)s "
"has been added") % {
'name': self.product.get_title()}
else:
msg = _("Your wishlist has been created")
messages.success(self.request, msg)
return HttpResponseRedirect(wishlist.get_absolute_url())
class WishListCreateWithProductView(View):
"""
Create a wish list and immediately add a product to it
"""
def post(self, request, *args, **kwargs):
product = get_object_or_404(Product, pk=kwargs['product_pk'])
wishlists = request.user.wishlists.all()
if len(wishlists) == 0:
wishlist = request.user.wishlists.create()
else:
# This shouldn't really happen but we default to using the first
# wishlist for a user if one already exists when they make this
# request.
wishlist = wishlists[0]
wishlist.add(product)
messages.success(
request, _("%(title)s has been added to your wishlist") % {
'title': product.get_title()})
return HttpResponseRedirect(request.META.get(
'HTTP_REFERER', wishlist.get_absolute_url()))
class WishListUpdateView(PageTitleMixin, UpdateView):
model = WishList
template_name = 'customer/wishlists/wishlists_form.html'
active_tab = "wishlists"
form_class = WishListForm
context_object_name = 'wishlist'
def get_page_title(self):
return self.object.name
def get_object(self, queryset=None):
return get_object_or_404(WishList, owner=self.request.user,
key=self.kwargs['key'])
def get_form_kwargs(self):
kwargs = super(WishListUpdateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_success_url(self):
messages.success(
self.request, _("Your '%s' wishlist has been updated") % self.object.name)
return reverse('customer:wishlists-list')
class WishListDeleteView(PageTitleMixin, DeleteView):
model = WishList
template_name = 'customer/wishlists/wishlists_delete.html'
active_tab = "wishlists"
def get_page_title(self):
return u'Delete %s' % self.object.name
def get_object(self, queryset=None):
return get_object_or_404(WishList, owner=self.request.user,
key=self.kwargs['key'])
def get_success_url(self):
messages.success(
self.request, _("Your '%s' wish list has been deleted") % self.object.name)
return reverse('customer:wishlists-list')
class WishListAddProduct(View):
"""
Adds a product to a wish list.
- If the user doesn't already have a wishlist then it will be created for
them.
- If the product is already in the wish list, its quantity is increased.
"""
def dispatch(self, request, *args, **kwargs):
self.product = get_object_or_404(Product, pk=kwargs['product_pk'])
self.wishlist = self.get_or_create_wishlist(request, *args, **kwargs)
return super(WishListAddProduct, self).dispatch(request)
def get_or_create_wishlist(self, request, *args, **kwargs):
wishlists = request.user.wishlists.all()
num_wishlists = len(wishlists)
if num_wishlists == 0:
return request.user.wishlists.create()
wishlist = wishlists[0]
if not wishlist.is_allowed_to_edit(request.user):
raise PermissionDenied
return wishlist
def get(self, request, *args, **kwargs):
# This is nasty as we shouldn't be performing write operations on a GET
# request. It's only included as the UI of the product detail page
# allows a wishlist to be selected from a dropdown.
return self.add_product()
def post(self, request, *args, **kwargs):
return self.add_product()
def add_product(self):
self.wishlist.add(self.product)
msg = _("'%s' was added to your wish list." % self.product.get_title())
messages.success(self.request, msg)
return HttpResponseRedirect(
self.request.META.get('HTTP_REFERER',
self.product.get_absolute_url()))
class LineMixin(object):
"""
Handles fetching both a wish list and a product
Views using this mixin must be passed two keyword arguments:
* key: The key of a wish list
* line_pk: The primary key of the wish list line
or
* product_pk: The primary key of the product
"""
def dispatch(self, request, *args, **kwargs):
self.wishlist = get_object_or_404(
WishList, owner=request.user, key=kwargs['key'])
try:
if 'line_pk' in kwargs:
self.line = self.wishlist.lines.get(pk=kwargs['line_pk'])
elif 'product_pk' in kwargs:
self.line = self.wishlist.lines.get(
product_id=kwargs['product_pk'])
except (ObjectDoesNotExist, MultipleObjectsReturned):
raise Http404
self.product = self.line.product
return super(LineMixin, self).dispatch(request, *args, **kwargs)
class WishListRemoveProduct(LineMixin, View):
def post(self, request, *args, **kwargs):
self.line.delete()
msg = _("'%(title)s' was removed from your '%(name)s' wish list") % {
'title': self.product.get_title(),
'name': self.wishlist.name}
messages.success(self.request, msg)
default_url = reverse(
'customer:wishlists-detail', kwargs={'key': self.wishlist.key})
return HttpResponseRedirect(self.request.META.get(
'HTTP_REFERER', default_url))
class WishListMoveProductToAnotherWishList(LineMixin, View):
def get(self, request, *args, **kwargs):
to_wishlist = get_object_or_404(
WishList, owner=request.user, key=kwargs['to_key'])
self.line.wishlist = to_wishlist
self.line.save()
msg = _("'%(title)s' moved to '%(name)s' wishlist") % {
'title': self.product.get_title(),
'name': to_wishlist.name}
messages.success(self.request, msg)
default_url = reverse(
'customer:wishlists-detail', kwargs={'key': self.wishlist.key})
return HttpResponseRedirect(self.request.META.get(
'HTTP_REFERER', default_url))
| bsd-3-clause |
xyziemba/chronology | src/chronology/cli.py | 1 | 4901 | import chronology
from chronology import config
import os
import signal
import argparse
import psutil
from subprocess import PIPE
import sys
class Client(object):
def __init__(self):
parser = argparse.ArgumentParser(
prog="chronology.py",
description="Control the Chronology daemon\nFor more info," +
" go to https://github.com/xyziemba/chronology",)
parser.add_argument("command",
choices=['start', 'stop', 'info',
'add-dir', 'remove-dir'])
parser.add_argument("-d", "--directory",
help="Specify a directory for"
"add-dir or remove-dir")
parser.add_argument("-w", "--watchdirFile",
help="File specifying the directories to watch")
parser.add_argument("-p", "--pidFile",
help="PID lock file")
args = parser.parse_args()
self.args = args
if args.pidFile:
self.pidConfig = config.PidConfig(args.pidFile)
else:
self.pidConfig = config.PidConfig()
if args.watchdirFile:
self.watchdirConfig = config.WatchdirConfig(args.watchdirFile)
else:
self.watchdirConfig = config.WatchdirConfig()
if args.command == 'start':
self._start()
elif args.command == 'stop':
self._stop()
elif args.command == 'info':
self._info()
elif args.command == 'add-dir':
if args.directory is None:
parser.print_usage()
print "\tadd-dir command requires "
"specifying a directory with -d"
sys.exit(0)
self.addDir()
elif args.command == 'remove-dir':
if args.directory is None:
parser.print_usage()
print "\tremove-dir command requires specifying a "
"directory with -d"
sys.exit(0)
self.removeDir()
def _start(self):
if not self.watchdirConfig.doesFileExist():
print "No file found at %s" % self.watchdirConfig.watchdirFile
print "Not configured to watch anything."
print
print "Run: chronology-cli.py add-dir -d <directory to watch>"
return
if self.pidConfig.isDaemonRunning():
print "Daemon already running"
return
# Always pass the config file parameters. This makes E2E testing easier
invocation = [sys.executable, self._findDaemonFile()]
invocation.append('-p')
invocation.append(self.pidConfig.filename)
invocation.append('-w')
invocation.append(self.watchdirConfig.filename)
p = psutil.Popen(invocation, stdout=PIPE)
print "Started daemon with PID", p.pid
def _stop(self):
pid = self.pidConfig.getChronologyPid()
if pid is None:
print "Daemon is not running"
return
proc = psutil.Process(pid)
print "Stopping PID %d with SIGINT" % pid
proc.send_signal(signal.SIGINT)
try:
proc.wait(timeout=5)
except psutil.TimeoutExpired:
print "Timed out."
print "Sending SIGABRT. On Windows, "
"kill %d with task manager" % pid
proc.send_signal(signal.SIGABRT)
else:
print "Exited successfully."
def _info(self):
pid = self.pidConfig.getChronologyPid()
if pid is None:
print "Status: STOPPED\nDaemon is not running"
else:
print "Status: RUNNING\nDaemon is running with PID", pid
print
if self.watchdirConfig.doesFileExist() and \
self.watchdirConfig.getWatchedFiles():
print "Watch dirs:"
for f in self.watchdirConfig.getWatchedFiles():
print "\t", f
else:
print "Not configured to watch anything."
print
print "Run: chronology-cli.py add-dir -d <directory to watch>"
def _addDir(self):
# validate dir
newDir = os.path.abspath(self.args.directory)
if not os.path.exists(newDir):
print "Unable to find %s" % newDir
return
self.watchdirConfig.addDirToWatch(args.directory)
pid = self.pidConfig.getChronologyPid()
if pid is not None:
stop()
start()
def _removeDir(self):
raise Exception("Not yet implemented. Edit %s by hand."
% config.watchdirFile)
def _findDaemonFile(self):
chronologyPackageDir = os.path.dirname(chronology.__file__)
daemonFilePath = os.path.join(
chronologyPackageDir, "chronology_daemon.py")
return daemonFilePath
| mit |
v-zhongz/azure-linux-extensions | Common/requests/packages/urllib3/contrib/pyopenssl.py | 197 | 10094 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
from __future__ import absolute_import
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout, error as SocketError
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED:
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS
# OpenSSL will only write 16K at a time
SSL_WRITE_BLOCKSIZE = 16384
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
# Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
# Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise SocketError(e)
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ca_cert_dir=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs or ca_cert_dir:
try:
ctx.load_verify_locations(ca_certs, ca_cert_dir)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
rd, _, _ = select.select([sock], [], [], sock.gettimeout())
if not rd:
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake: %r' % e)
break
return WrappedSocket(cnx, sock)
| apache-2.0 |
anupcshan/bazel | tools/android/strip_resources.py | 31 | 1825 | # Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Removes the resources from a resource APK for incremental deployment.
The reason this utility exists is that the only way we can build a binary
AndroidManifest.xml is by invoking aapt, which builds a whole resource .apk.
Thus, in order to build the AndroidManifest.xml for an incremental .apk, we
invoke aapt, then extract AndroidManifest.xml from its output.
"""
import sys
import zipfile
from third_party.py import gflags
gflags.DEFINE_string("input_resource_apk", None, "The input resource .apk")
gflags.DEFINE_string("output_resource_apk", None, "The output resource .apk")
FLAGS = gflags.FLAGS
HERMETIC_TIMESTAMP = (2001, 1, 1, 0, 0, 0)
def main():
with zipfile.ZipFile(FLAGS.input_resource_apk) as input_zip:
with input_zip.open("AndroidManifest.xml") as android_manifest_entry:
android_manifest = android_manifest_entry.read()
with zipfile.ZipFile(FLAGS.output_resource_apk, "w") as output_zip:
# Timestamp is explicitly set so that the resulting zip file is hermetic
zipinfo = zipfile.ZipInfo(
filename="AndroidManifest.xml",
date_time=HERMETIC_TIMESTAMP)
output_zip.writestr(zipinfo, android_manifest)
if __name__ == "__main__":
FLAGS(sys.argv)
main()
| apache-2.0 |
jleonhard/angular2-webpack-jquery-bootstrap | node_modules/node-forge/tests/policyserver.py | 171 | 3551 | #!/usr/bin/env python
"""
Flash Socket Policy Server.
- Starts Flash socket policy file server.
- Defaults to port 843.
- NOTE: Most operating systems require administrative privileges to use
ports under 1024.
$ ./policyserver.py [options]
"""
"""
Also consider Adobe's solutions:
http://www.adobe.com/devnet/flashplayer/articles/socket_policy_files.html
"""
from multiprocessing import Process
from optparse import OptionParser
import SocketServer
import logging
# Set address reuse for all TCPServers
SocketServer.TCPServer.allow_reuse_address = True
# Static socket policy file string.
# NOTE: This format is very strict. Edit with care.
socket_policy_file = """\
<?xml version="1.0"?>\
<!DOCTYPE cross-domain-policy\
SYSTEM "http://www.adobe.com/xml/dtds/cross-domain-policy.dtd">\
<cross-domain-policy>\
<allow-access-from domain="*" to-ports="*"/>\
</cross-domain-policy>\0"""
class PolicyHandler(SocketServer.BaseRequestHandler):
"""
The RequestHandler class for our server.
Returns a policy file when requested.
"""
def handle(self):
"""Send policy string if proper request string is received."""
# get some data
# TODO: make this more robust (while loop, etc)
self.data = self.request.recv(1024).rstrip('\0')
logging.debug("%s wrote:%s" % (self.client_address[0], repr(self.data)))
# if policy file request, send the file.
if self.data == "<policy-file-request/>":
logging.info("Policy server request from %s." % (self.client_address[0]))
self.request.send(socket_policy_file)
else:
logging.info("Policy server received junk from %s: \"%s\"" % \
(self.client_address[0], repr(self.data)))
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def serve_forever(self):
"""Handle one request at a time until shutdown or keyboard interrupt."""
try:
SocketServer.BaseServer.serve_forever(self)
except KeyboardInterrupt:
return
def main():
"""Run socket policy file servers."""
usage = "Usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("", "--host", dest="host", metavar="HOST",
default="localhost", help="bind to HOST")
parser.add_option("-p", "--port", dest="port", metavar="PORT",
default=843, type="int", help="serve on PORT")
parser.add_option("-d", "--debug", dest="debug", action="store_true",
default=False, help="debugging output")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="verbose output")
(options, args) = parser.parse_args()
# setup logging
if options.debug:
lvl = logging.DEBUG
elif options.verbose:
lvl = logging.INFO
else:
lvl = logging.WARNING
logging.basicConfig(level=lvl, format="%(levelname)-8s %(message)s")
# log basic info
logging.info("Flash Socket Policy Server. Use ctrl-c to exit.")
# create policy server
logging.info("Socket policy serving on %s:%d." % (options.host, options.port))
policyd = ThreadedTCPServer((options.host, options.port), PolicyHandler)
# start server
policy_p = Process(target=policyd.serve_forever)
policy_p.start()
while policy_p.is_alive():
try:
policy_p.join(1)
except KeyboardInterrupt:
logging.info("Stopping test server...")
if __name__ == "__main__":
main()
| mit |
apocalypsebg/odoo | addons/sale_layout/models/sale_layout.py | 180 | 5037 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from itertools import groupby
def grouplines(self, ordered_lines, sortkey):
"""Return lines from a specified invoice or sale order grouped by category"""
grouped_lines = []
for key, valuesiter in groupby(ordered_lines, sortkey):
group = {}
group['category'] = key
group['lines'] = list(v for v in valuesiter)
if 'subtotal' in key and key.subtotal is True:
group['subtotal'] = sum(line.price_subtotal for line in group['lines'])
grouped_lines.append(group)
return grouped_lines
class SaleLayoutCategory(osv.Model):
_name = 'sale_layout.category'
_order = 'sequence, id'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True),
'subtotal': fields.boolean('Add subtotal'),
'separator': fields.boolean('Add separator'),
'pagebreak': fields.boolean('Add pagebreak')
}
_defaults = {
'subtotal': True,
'separator': True,
'pagebreak': False,
'sequence': 10
}
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
def sale_layout_lines(self, cr, uid, ids, invoice_id=None, context=None):
"""
Returns invoice lines from a specified invoice ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'invoice_id' (int): specify the concerned invoice.
"""
ordered_lines = self.browse(cr, uid, invoice_id, context=context).invoice_line
# We chose to group first by category model and, if not present, by invoice name
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
import openerp
class AccountInvoiceLine(osv.Model):
_inherit = 'account.invoice.line'
_order = 'invoice_id, categ_sequence, sequence, id'
sale_layout_cat_id = openerp.fields.Many2one('sale_layout.category', string='Section')
categ_sequence = openerp.fields.Integer(related='sale_layout_cat_id.sequence',
string='Layout Sequence', store=True)
_defaults = {
'categ_sequence': 0
}
class SaleOrder(osv.Model):
_inherit = 'sale.order'
def sale_layout_lines(self, cr, uid, ids, order_id=None, context=None):
"""
Returns order lines from a specified sale ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'order_id' (int): specify the concerned sale order.
"""
ordered_lines = self.browse(cr, uid, order_id, context=context).order_line
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
class SaleOrderLine(osv.Model):
_inherit = 'sale.order.line'
_columns = {
'sale_layout_cat_id': fields.many2one('sale_layout.category',
string='Section'),
'categ_sequence': fields.related('sale_layout_cat_id',
'sequence', type='integer',
string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
}
_defaults = {
'categ_sequence': 0
}
_order = 'order_id, categ_sequence, sale_layout_cat_id, sequence, id'
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Save the layout when converting to an invoice line."""
invoice_vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)
if line.sale_layout_cat_id:
invoice_vals['sale_layout_cat_id'] = line.sale_layout_cat_id.id
if line.categ_sequence:
invoice_vals['categ_sequence'] = line.categ_sequence
return invoice_vals
| agpl-3.0 |
jtb0/myPiProject | bewegungssensor.py | 1 | 3437 | #!/usr/bin/env python
#coding: utf8
###############################################################################
# #
# Dieses Skript wird aufgerufen mit der Übergabe der Parameter. #
# In Diesem Fall sind die Parameter alle Pins, an denen ein Bewegungs-Sensor #
# angeschlossen wurde. #
# #
# Beispiel: #
# $ ./dht22.py 16 18 22 #
# #
# In diesem Beispiel sind an den (Physical-Zählweise) Pins 16, 18 und 22 #
# Bewegungs-Sensoren angeschlossen. Dies entsprcht: #
# Pin 16 = GPIO. 4 #
# Pin 18 = GPIO. 5 #
# Pin 22 = GPIO. 3 #
# #
###############################################################################
import time
import os
import sys
import RPi.GPIO as GPIO
# Umgebungsvariable EBENE auslesen
EBENE = int(os.environ['EBENE'])
# Zählweise der Pins festlegen (physikalische Zählweise)
GPIO.setmode(GPIO.BOARD)
# Alle Pins aus der Umgebungsvariable lesen, an der der Bewegunssensor angeschlossen ist
BEWEGUNGSSENSOR = os.environ['BEWEGUNGSSENSOR']
# In eine Liste umwandeln
ListOfSensor = BEWEGUNGSSENSOR.split(":")
# Alle übergebenen Pins als Eingänge festlegen
#for arg in sys.argv[1:]:
for arg in ListOfSensor:
pin = int(arg)
GPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
# Methode deklarieren, falls ein Event auftritt
def doIfHigh(channel):
# Zugriff auf Variable i ermögliche
global i
if GPIO.input(channel) == GPIO.HIGH:
commandString="curl --header \"Content-Type: text/plain\" --request POST --data \"ON\" http://192.168.0.90:8080/rest/items/Bewegung_E{0}_PIN{1}" .format(EBENE, pin)
# Wenn Eingang HIGH ist, Ausgabe im Terminal erzeugen
print "Eingang HIGH " + str(i)
print commandString
os.system(commandString)
else:
commandString="curl --header \"Content-Type: text/plain\" --request POST --data \"OFF\" http://192.168.0.90:8080/rest/items/Bewegung_E{0}_PIN{1}" .format(EBENE, pin)
# Wenn Eingang LOW ist, Ausgabe im Terminal erzeugen
print "Eingang LOW " + str(i)
print commandString
os.system(commandString)
# Schleifenzähler erhöhen
i = i + 1
# Alle Pins in der Liste ...
#for arg in sys.argv[1:]:
for arg in ListOfSensor:
pin = int(arg)
print "Pin: {0}" .format(pin)
# ... als Eingänge festlegen
GPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
#GPIO.setup(4, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
# ... ein Ereignis deklarieren
GPIO.add_event_detect(pin, GPIO.BOTH, callback = doIfHigh, bouncetime = 500)
#GPIO.add_event_detect(4, GPIO.BOTH, callback = doIfHigh, bouncetime = 500)
# Schleifenzähler
i = 0
# Eigentlicher Programmablauf
decimal_seconds = 0
while 1:
time.sleep(0.1)
raw_input("Enter zum Beenden!\n")
| gpl-3.0 |
KAMI911/wgslas2eovlas | lactransformer/libs/PefFile.py | 2 | 2071 | # -*- coding: UTF-8 -*-
class PefFile:
def __init__(self, filename):
self.__FileName = filename
self.__PefLines = 8
self.__FileIndex = 0
def OpenRO(self):
try:
self.__PefFileHandler = open(self.__FileName, 'r')
except IOError as ErrorText:
print ('Error: %s' % str(ErrorText))
def Open(self):
self.__PefFileHandler = open(self.__FileName, 'a')
def OpenOW(self):
self.__PefFileHandler = open(self.__FileName, 'w')
self.__PefFileHandler.truncate()
def GotoStart(self):
self.__FileIndex = 0
def ReadNextItem(self, FileSeek=None):
if not FileSeek:
FileSeek = self.__FileIndex
PefContent = []
HasData = False
while True:
OneLine = (self.__PefFileHandler.readline().rstrip('\n\r'))
if OneLine != '':
HasData = True
TwoPairs = OneLine.split('=')
PefContent.append(TwoPairs)
elif OneLine == '' and HasData == True:
break
elif OneLine == '' and HasData == False:
break
self.__FileIndex = self.__PefFileHandler.tell()
return PefContent
def TestItem(self, PefContent):
errors = 0
if PefContent[0][0] == 'TYPE' and PefContent[0][1] == 'PlaneObj':
print('PEF Type is okay...')
else:
errors += 1
if PefContent[3][1] == '8':
print('Count is 8')
else:
errors += 1
print (errors)
return errors
def WriteNextItem(self, PefContent):
for line in PefContent:
OneLine = '%s=%s\r\n' % (line[0], line[1])
self.__PefFileHandler.writelines(OneLine)
self.__PefFileHandler.writelines('\r\n')
def PrintNextItem(self):
print (self.__FileIndex)
Lines = self.ReadNextItem()
print (self.__FileIndex)
for l in Lines:
print (l)
def Close(self):
self.__PefFileHandler.close()
| gpl-2.0 |
UXE/local-edx | cms/urls.py | 2 | 7230 | from django.conf import settings
from django.conf.urls import patterns, include, url
# There is a course creators admin table.
from ratelimitbackend import admin
admin.autodiscover()
urlpatterns = patterns('', # nopep8
url(r'^transcripts/upload$', 'contentstore.views.upload_transcripts', name='upload_transcripts'),
url(r'^transcripts/download$', 'contentstore.views.download_transcripts', name='download_transcripts'),
url(r'^transcripts/check$', 'contentstore.views.check_transcripts', name='check_transcripts'),
url(r'^transcripts/choose$', 'contentstore.views.choose_transcripts', name='choose_transcripts'),
url(r'^transcripts/replace$', 'contentstore.views.replace_transcripts', name='replace_transcripts'),
url(r'^transcripts/rename$', 'contentstore.views.rename_transcripts', name='rename_transcripts'),
url(r'^transcripts/save$', 'contentstore.views.save_transcripts', name='save_transcripts'),
url(r'^preview/xblock/(?P<usage_key_string>.*?)/handler/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$',
'contentstore.views.preview_handler', name='preview_handler'),
url(r'^xblock/(?P<usage_key_string>.*?)/handler/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$',
'contentstore.views.component_handler', name='component_handler'),
url(r'^xblock/resource/(?P<block_type>[^/]*)/(?P<uri>.*)$',
'contentstore.views.xblock.xblock_resource', name='xblock_resource_url'),
# temporary landing page for a course
url(r'^edge/(?P<org>[^/]+)/(?P<course>[^/]+)/course/(?P<coursename>[^/]+)$',
'contentstore.views.landing', name='landing'),
url(r'^not_found$', 'contentstore.views.not_found', name='not_found'),
url(r'^server_error$', 'contentstore.views.server_error', name='server_error'),
# temporary landing page for edge
url(r'^edge$', 'contentstore.views.edge', name='edge'),
# noop to squelch ajax errors
url(r'^event$', 'contentstore.views.event', name='event'),
url(r'^xmodule/', include('pipeline_js.urls')),
url(r'^heartbeat$', include('heartbeat.urls')),
url(r'^user_api/', include('openedx.core.djangoapps.user_api.urls')),
url(r'^lang_pref/', include('lang_pref.urls')),
)
# User creation and updating views
urlpatterns += patterns(
'',
url(r'^create_account$', 'student.views.create_account', name='create_account'),
url(r'^activate/(?P<key>[^/]*)$', 'student.views.activate_account', name='activate'),
# ajax view that actually does the work
url(r'^login_post$', 'student.views.login_user', name='login_post'),
url(r'^logout$', 'student.views.logout_user', name='logout'),
url(r'^embargo$', 'student.views.embargo', name="embargo"),
)
# restful api
urlpatterns += patterns(
'contentstore.views',
url(r'^$', 'howitworks', name='homepage'),
url(r'^howitworks$', 'howitworks'),
url(r'^signup$', 'signup', name='signup'),
url(r'^signin$', 'login_page', name='login'),
url(r'^request_course_creator$', 'request_course_creator'),
url(r'^course_team/{}/(?P<email>.+)?$'.format(settings.COURSE_KEY_PATTERN), 'course_team_handler'),
url(r'^course_info/{}$'.format(settings.COURSE_KEY_PATTERN), 'course_info_handler'),
url(
r'^course_info_update/{}/(?P<provided_id>\d+)?$'.format(settings.COURSE_KEY_PATTERN),
'course_info_update_handler'
),
url(r'^course/{}?$'.format(settings.COURSE_KEY_PATTERN), 'course_handler', name='course_handler'),
url(r'^course_notifications/{}/(?P<action_state_id>\d+)?$'.format(settings.COURSE_KEY_PATTERN), 'course_notifications_handler'),
url(r'^course_rerun/{}$'.format(settings.COURSE_KEY_PATTERN), 'course_rerun_handler', name='course_rerun_handler'),
url(r'^container/{}$'.format(settings.USAGE_KEY_PATTERN), 'container_handler'),
url(r'^checklists/{}/(?P<checklist_index>\d+)?$'.format(settings.COURSE_KEY_PATTERN), 'checklists_handler'),
url(r'^orphan/{}$'.format(settings.COURSE_KEY_PATTERN), 'orphan_handler'),
url(r'^assets/{}/{}?$'.format(settings.COURSE_KEY_PATTERN, settings.ASSET_KEY_PATTERN), 'assets_handler'),
url(r'^import/{}$'.format(settings.COURSE_KEY_PATTERN), 'import_handler'),
url(r'^import_status/{}/(?P<filename>.+)$'.format(settings.COURSE_KEY_PATTERN), 'import_status_handler'),
url(r'^export/{}$'.format(settings.COURSE_KEY_PATTERN), 'export_handler'),
url(r'^xblock/outline/{}$'.format(settings.USAGE_KEY_PATTERN), 'xblock_outline_handler'),
url(r'^xblock/{}/(?P<view_name>[^/]+)$'.format(settings.USAGE_KEY_PATTERN), 'xblock_view_handler'),
url(r'^xblock/{}?$'.format(settings.USAGE_KEY_PATTERN), 'xblock_handler'),
url(r'^tabs/{}$'.format(settings.COURSE_KEY_PATTERN), 'tabs_handler'),
url(r'^settings/details/{}$'.format(settings.COURSE_KEY_PATTERN), 'settings_handler'),
url(r'^settings/grading/{}(/)?(?P<grader_index>\d+)?$'.format(settings.COURSE_KEY_PATTERN), 'grading_handler'),
url(r'^settings/advanced/{}$'.format(settings.COURSE_KEY_PATTERN), 'advanced_settings_handler'),
url(r'^textbooks/{}$'.format(settings.COURSE_KEY_PATTERN), 'textbooks_list_handler'),
url(r'^textbooks/{}/(?P<textbook_id>\d[^/]*)$'.format(settings.COURSE_KEY_PATTERN), 'textbooks_detail_handler'),
url(r'^group_configurations/{}$'.format(settings.COURSE_KEY_PATTERN), 'group_configurations_list_handler'),
url(r'^group_configurations/{}/(?P<group_configuration_id>\d+)/?$'.format(settings.COURSE_KEY_PATTERN),
'group_configurations_detail_handler'),
url(r'^api/val/v0/', include('edxval.urls')),
)
js_info_dict = {
'domain': 'djangojs',
# We need to explicitly include external Django apps that are not in LOCALE_PATHS.
'packages': ('openassessment',),
}
urlpatterns += patterns(
'',
# Serve catalog of localized strings to be rendered by Javascript
url(r'^i18n.js$', 'django.views.i18n.javascript_catalog', js_info_dict),
)
if settings.FEATURES.get('ENABLE_EXPORT_GIT'):
urlpatterns += (url(
r'^export_git/{}$'.format(
settings.COURSE_KEY_PATTERN,
),
'contentstore.views.export_git',
name='export_git',
),)
if settings.FEATURES.get('ENABLE_SERVICE_STATUS'):
urlpatterns += patterns(
'',
url(r'^status/', include('service_status.urls')),
)
if settings.FEATURES.get('AUTH_USE_CAS'):
urlpatterns += (
url(r'^cas-auth/login/$', 'external_auth.views.cas_login', name="cas-login"),
url(r'^cas-auth/logout/$', 'django_cas.views.logout', {'next_page': '/'}, name="cas-logout"),
)
urlpatterns += patterns('', url(r'^admin/', include(admin.site.urls)),)
# enable automatic login
if settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING'):
urlpatterns += (
url(r'^auto_auth$', 'student.views.auto_auth'),
)
if settings.DEBUG:
try:
from .urls_dev import urlpatterns as dev_urlpatterns
urlpatterns += dev_urlpatterns
except ImportError:
pass
# Custom error pages
# pylint: disable=invalid-name
handler404 = 'contentstore.views.render_404'
handler500 = 'contentstore.views.render_500'
# display error page templates, for testing purposes
urlpatterns += (
url(r'404', handler404),
url(r'500', handler500),
)
| agpl-3.0 |
kyubifire/softlayer-python | SoftLayer/fixtures/SoftLayer_Virtual_PlacementGroup.py | 3 | 1643 | getAvailableRouters = [{
"accountId": 1,
"fullyQualifiedDomainName": "bcr01.dal01.softlayer.com",
"hostname": "bcr01.dal01",
"id": 1,
"topLevelLocation": {
"id": 3,
"longName": "Dallas 1",
"name": "dal01",
}
}]
createObject = {
"accountId": 123,
"backendRouterId": 444,
"createDate": "2019-01-18T16:08:44-06:00",
"id": 5555,
"modifyDate": None,
"name": "test01",
"ruleId": 1
}
getObject = {
"createDate": "2019-01-17T14:36:42-06:00",
"id": 1234,
"name": "test-group",
"backendRouter": {
"hostname": "bcr01a.mex01",
"id": 329266
},
"guests": [{
"accountId": 123456789,
"createDate": "2019-01-17T16:44:46-06:00",
"domain": "test.com",
"fullyQualifiedDomainName": "issues10691547765077.test.com",
"hostname": "issues10691547765077",
"id": 69131875,
"maxCpu": 1,
"maxMemory": 1024,
"placementGroupId": 1234,
"provisionDate": "2019-01-17T16:47:17-06:00",
"activeTransaction": {
"id": 107585077,
"transactionStatus": {
"friendlyName": "TESTING TXN",
"name": "RECLAIM_WAIT"
}
},
"globalIdentifier": "c786ac04-b612-4649-9d19-9662434eeaea",
"primaryBackendIpAddress": "10.131.11.14",
"primaryIpAddress": "169.57.70.180",
"status": {
"keyName": "DISCONNECTED",
"name": "Disconnected"
}
}],
"rule": {
"id": 1,
"keyName": "SPREAD",
"name": "SPREAD"
}
}
deleteObject = True
| mit |
abali96/Shapely | tests/test_emptiness.py | 6 | 1453 | from . import unittest
from shapely.geometry.base import BaseGeometry
import shapely.geometry as sgeom
from shapely.geometry.polygon import LinearRing
class EmptinessTestCase(unittest.TestCase):
def test_empty_base(self):
g = BaseGeometry()
self.assertTrue(g._is_empty)
def test_emptying_point(self):
p = sgeom.Point(0, 0)
self.assertFalse(p._is_empty)
p.empty()
self.assertTrue(p._is_empty)
def test_none_geom(self):
p = BaseGeometry()
p._geom = None
self.assertTrue(p.is_empty)
def test_empty_point(self):
self.assertTrue(sgeom.Point().is_empty)
def test_empty_multipoint(self):
self.assertTrue(sgeom.MultiPoint().is_empty)
def test_empty_geometry_collection(self):
self.assertTrue(sgeom.GeometryCollection().is_empty)
def test_empty_linestring(self):
self.assertTrue(sgeom.LineString().is_empty)
def test_empty_multilinestring(self):
self.assertTrue(sgeom.MultiLineString([]).is_empty)
def test_empty_polygon(self):
self.assertTrue(sgeom.Polygon().is_empty)
def test_empty_multipolygon(self):
self.assertTrue(sgeom.MultiPolygon([]).is_empty)
def test_empty_linear_ring(self):
self.assertTrue(LinearRing().is_empty)
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(EmptinessTestCase)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Nikoala/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/appletrailers.py | 19 | 5130 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
)
class AppleTrailersIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)'
_TEST = {
"url": "http://trailers.apple.com/trailers/wb/manofsteel/",
"playlist": [
{
"md5": "d97a8e575432dbcb81b7c3acb741f8a8",
"info_dict": {
"id": "manofsteel-trailer4",
"ext": "mov",
"duration": 111,
"title": "Trailer 4",
"upload_date": "20130523",
"uploader_id": "wb",
},
},
{
"md5": "b8017b7131b721fb4e8d6f49e1df908c",
"info_dict": {
"id": "manofsteel-trailer3",
"ext": "mov",
"duration": 182,
"title": "Trailer 3",
"upload_date": "20130417",
"uploader_id": "wb",
},
},
{
"md5": "d0f1e1150989b9924679b441f3404d48",
"info_dict": {
"id": "manofsteel-trailer",
"ext": "mov",
"duration": 148,
"title": "Trailer",
"upload_date": "20121212",
"uploader_id": "wb",
},
},
{
"md5": "5fe08795b943eb2e757fa95cb6def1cb",
"info_dict": {
"id": "manofsteel-teaser",
"ext": "mov",
"duration": 93,
"title": "Teaser",
"upload_date": "20120721",
"uploader_id": "wb",
},
},
]
}
_JSON_RE = r'iTunes.playURL\((.*?)\);'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
movie = mobj.group('movie')
uploader_id = mobj.group('company')
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
def fix_html(s):
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
# The ' in the onClick attributes are not escaped, it couldn't be parsed
# like: http://trailers.apple.com/trailers/wb/gravity/
def _clean_json(m):
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
s = re.sub(self._JSON_RE, _clean_json, s)
s = '<html>%s</html>' % s
return s
doc = self._download_xml(playlist_url, movie, transform_source=fix_html)
playlist = []
for li in doc.findall('./div/ul/li'):
on_click = li.find('.//a').attrib['onClick']
trailer_info_json = self._search_regex(self._JSON_RE,
on_click, 'trailer info')
trailer_info = json.loads(trailer_info_json)
title = trailer_info['title']
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
thumbnail = li.find('.//img').attrib['src']
upload_date = trailer_info['posted'].replace('-', '')
runtime = trailer_info['runtime']
m = re.search(r'(?P<minutes>[0-9]+):(?P<seconds>[0-9]{1,2})', runtime)
duration = None
if m:
duration = 60 * int(m.group('minutes')) + int(m.group('seconds'))
first_url = trailer_info['url']
trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower()
settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id)
settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json')
formats = []
for format in settings['metadata']['sizes']:
# The src is a file pointing to the real video file
format_url = re.sub(r'_(\d*p.mov)', r'_h\1', format['src'])
formats.append({
'url': format_url,
'format': format['type'],
'width': int_or_none(format['width']),
'height': int_or_none(format['height']),
})
self._sort_formats(formats)
playlist.append({
'_type': 'video',
'id': video_id,
'title': title,
'formats': formats,
'title': title,
'duration': duration,
'thumbnail': thumbnail,
'upload_date': upload_date,
'uploader_id': uploader_id,
'user_agent': 'QuickTime compatible (youtube-dl)',
})
return {
'_type': 'playlist',
'id': movie,
'entries': playlist,
}
| gpl-3.0 |
bitcity/django | django/contrib/admin/templatetags/log.py | 499 | 2080 | from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
entries = LogEntry.objects.all()
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
entries = LogEntry.objects.filter(user__pk=user_id)
context[self.varname] = entries.select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(tokens[5] if len(tokens) > 5 else None))
| bsd-3-clause |
jreichhold/chef-repo | vendor/ruby/2.0.0/gems/nokogiri-1.6.1/ext/nokogiri/tmp/x86_64-apple-darwin13.1.0/ports/libxml2/2.8.0/libxml2-2.8.0/doc/examples/index.py | 86 | 8896 | #!/usr/bin/python -u
#
# Indexes the examples and build an XML description
#
import string
import glob
import sys
try:
import libxml2
except:
sys.exit(1)
sys.path.insert(0, "..")
from apibuild import CParser, escape
examples = []
extras = ['examples.xsl', 'index.py']
tests = []
sections = {}
symbols = {}
api_dict = None
api_doc = None
def load_api():
global api_dict
global api_doc
if api_dict != None:
return
api_dict = {}
try:
print "loading ../libxml2-api.xml"
api_doc = libxml2.parseFile("../libxml2-api.xml")
except:
print "failed to parse ../libxml2-api.xml"
sys.exit(1)
def find_symbol(name):
global api_dict
global api_doc
if api_doc == None:
load_api()
if name == None:
return
if api_dict.has_key(name):
return api_dict[name]
ctxt = api_doc.xpathNewContext()
res = ctxt.xpathEval("/api/symbols/*[@name = '%s']" % (name))
if type(res) == type([]) and len(res) >= 1:
if len(res) > 1:
print "Found %d references to %s in the API" % (len(res), name)
node = res[0]
typ = node.name
file = node.xpathEval("string(@file)")
info = node.xpathEval("string(info)")
else:
print "Reference %s not found in the API" % (name)
return None
ret = (typ, file, info)
api_dict[name] = ret
return ret
def parse_top_comment(filename, comment):
res = {}
lines = string.split(comment, "\n")
item = None
for line in lines:
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
while line != "" and line[0] == '*':
line = line[1:]
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
try:
(it, line) = string.split(line, ":", 1)
item = it
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
if res.has_key(item):
res[item] = res[item] + " " + line
else:
res[item] = line
except:
if item != None:
if res.has_key(item):
res[item] = res[item] + " " + line
else:
res[item] = line
return res
def parse(filename, output):
global symbols
global sections
parser = CParser(filename)
parser.collect_references()
idx = parser.parse()
info = parse_top_comment(filename, parser.top_comment)
output.write(" <example filename='%s'>\n" % filename)
try:
synopsis = info['synopsis']
output.write(" <synopsis>%s</synopsis>\n" % escape(synopsis));
except:
print "Example %s lacks a synopsis description" % (filename)
try:
purpose = info['purpose']
output.write(" <purpose>%s</purpose>\n" % escape(purpose));
except:
print "Example %s lacks a purpose description" % (filename)
try:
usage = info['usage']
output.write(" <usage>%s</usage>\n" % escape(usage));
except:
print "Example %s lacks an usage description" % (filename)
try:
test = info['test']
output.write(" <test>%s</test>\n" % escape(test));
progname=filename[0:-2]
command=string.replace(test, progname, './' + progname, 1)
tests.append(command)
except:
pass
try:
author = info['author']
output.write(" <author>%s</author>\n" % escape(author));
except:
print "Example %s lacks an author description" % (filename)
try:
copy = info['copy']
output.write(" <copy>%s</copy>\n" % escape(copy));
except:
print "Example %s lacks a copyright description" % (filename)
try:
section = info['section']
output.write(" <section>%s</section>\n" % escape(section));
if sections.has_key(section):
sections[section].append(filename)
else:
sections[section] = [filename]
except:
print "Example %s lacks a section description" % (filename)
for topic in info.keys():
if topic != "purpose" and topic != "usage" and \
topic != "author" and topic != "copy" and \
topic != "section" and topic != "synopsis" and topic != "test":
str = info[topic]
output.write(" <extra topic='%s'>%s</extra>\n" % (
escape(topic), escape(str)))
output.write(" <includes>\n")
for include in idx.includes.keys():
if include.find("libxml") != -1:
output.write(" <include>%s</include>\n" % (escape(include)))
output.write(" </includes>\n")
output.write(" <uses>\n")
for ref in idx.references.keys():
id = idx.references[ref]
name = id.get_name()
line = id.get_lineno()
if symbols.has_key(name):
sinfo = symbols[name]
refs = sinfo[0]
# gather at most 5 references per symbols
if refs > 5:
continue
sinfo.append(filename)
sinfo[0] = refs + 1
else:
symbols[name] = [1, filename]
info = find_symbol(name)
if info != None:
type = info[0]
file = info[1]
output.write(" <%s line='%d' file='%s' name='%s'/>\n" % (type,
line, file, name))
else:
type = id.get_type()
output.write(" <%s line='%d' name='%s'/>\n" % (type,
line, name))
output.write(" </uses>\n")
output.write(" </example>\n")
return idx
def dump_symbols(output):
global symbols
output.write(" <symbols>\n")
keys = symbols.keys()
keys.sort()
for symbol in keys:
output.write(" <symbol name='%s'>\n" % (symbol))
info = symbols[symbol]
i = 1
while i < len(info):
output.write(" <ref filename='%s'/>\n" % (info[i]))
i = i + 1
output.write(" </symbol>\n")
output.write(" </symbols>\n")
def dump_sections(output):
global sections
output.write(" <sections>\n")
keys = sections.keys()
keys.sort()
for section in keys:
output.write(" <section name='%s'>\n" % (section))
info = sections[section]
i = 0
while i < len(info):
output.write(" <example filename='%s'/>\n" % (info[i]))
i = i + 1
output.write(" </section>\n")
output.write(" </sections>\n")
def dump_Makefile():
for file in glob.glob('*.xml'):
extras.append(file)
for file in glob.glob('*.res'):
extras.append(file)
Makefile="""# Beware this is autogenerated by index.py
INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include -I@srcdir@/include @THREAD_CFLAGS@ @Z_CFLAGS@
DEPS = $(top_builddir)/libxml2.la
LDADDS = @STATIC_BINARIES@ $(top_builddir)/libxml2.la @THREAD_LIBS@ @Z_LIBS@ $(ICONV_LIBS) -lm @WIN32_EXTRA_LIBADD@
rebuild: examples.xml index.html
examples.xml: index.py *.c
-@($(srcdir)/index.py)
index.html: examples.xml examples.xsl
-@(xsltproc examples.xsl examples.xml && echo "Rebuilt web page" && xmllint --valid --noout index.html)
install-data-local:
$(mkinstalldirs) $(DESTDIR)$(HTML_DIR)
-@INSTALL@ -m 0644 $(srcdir)/*.html $(srcdir)/*.c $(srcdir)/*.xml $(srcdir)/*.xsl $(srcdir)/*.res $(DESTDIR)$(HTML_DIR)
"""
EXTRA_DIST=""
for extra in extras:
EXTRA_DIST = EXTRA_DIST + extra + " "
Makefile = Makefile + "EXTRA_DIST=%s\n\n" % (EXTRA_DIST)
noinst_PROGRAMS=""
for example in examples:
noinst_PROGRAMS = noinst_PROGRAMS + example + " "
Makefile = Makefile + "noinst_PROGRAMS=%s\n\n" % (noinst_PROGRAMS)
for example in examples:
Makefile = Makefile + "%s_SOURCES=%s.c\n%s_LDFLAGS=\n%s_DEPENDENCIES= $(DEPS)\n%s_LDADD= @RDL_LIBS@ $(LDADDS)\n\n" % (example, example, example,
example, example)
Makefile = Makefile + "valgrind: \n\t$(MAKE) CHECKER='valgrind' tests\n\n"
Makefile = Makefile + "tests: $(noinst_PROGRAMS)\n"
Makefile = Makefile + "\t@(echo '## examples regression tests')\n"
Makefile = Makefile + "\t@(echo > .memdump)\n"
for test in tests:
Makefile = Makefile + "\t@($(CHECKER) %s)\n" % (test)
Makefile = Makefile + '\t@(grep "MORY ALLO" .memdump | grep -v "MEMORY ALLOCATED : 0" ; exit 0)\n'
Makefile = Makefile + "\n\n"
try:
old = open("Makefile.am", "r").read()
if old != Makefile:
n = open("Makefile.am", "w").write(Makefile)
print "Updated Makefile.am"
except:
print "Failed to read or save Makefile.am"
#
# Autogenerate the .cvsignore too ...
#
ignore = """.memdump
Makefile.in
Makefile
"""
for example in examples:
ignore = ignore + "%s\n" % (example)
try:
old = open(".cvsignore", "r").read()
if old != ignore:
n = open(".cvsignore", "w").write(ignore)
print "Updated .cvsignore"
except:
print "Failed to read or save .cvsignore"
if __name__ == "__main__":
load_api()
output = open("examples.xml", "w")
output.write("<examples>\n")
for file in glob.glob('*.c'):
parse(file, output)
examples.append(file[:-2])
dump_symbols(output)
dump_sections(output)
output.write("</examples>\n")
output.close()
dump_Makefile()
| apache-2.0 |
rosswhitfield/mantid | qt/python/mantidqt/widgets/sliceviewer/peaksviewer/test/test_peaksviewer_presenter.py | 3 | 5480 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
# std imports
import unittest
from unittest.mock import create_autospec, patch, ANY, MagicMock
# 3rdparty imports
from mantid.dataobjects import PeaksWorkspace
from mantidqt.widgets.sliceviewer.peaksviewer import PeaksViewerModel
from mantidqt.widgets.sliceviewer.peaksviewer.presenter \
import PeaksViewerPresenter, PeaksWorkspaceDataPresenter
from mantidqt.widgets.sliceviewer.peaksviewer.view \
import PeaksViewerView, _PeaksWorkspaceTableView
from mantidqt.widgets.sliceviewer.peaksviewer.test.modeltesthelpers\
import create_peaks_viewer_model, create_slice_info # noqa
def create_test_model(name):
"""Create a test model object from a mock workspace"""
mock_ws = create_autospec(PeaksWorkspace)
mock_ws.name.return_value = name
return PeaksViewerModel(mock_ws, 'r', 'b')
def create_mock_model(name):
"""Create a mock object to test calss"""
mock_ws = create_autospec(PeaksWorkspace)
mock_ws.name.return_value = name
mock = create_autospec(PeaksViewerModel)
return mock
def create_mock_view():
mock_view = create_autospec(PeaksViewerView)
mock_view.table_view = create_autospec(_PeaksWorkspaceTableView)
return mock_view
@patch("mantidqt.widgets.sliceviewer.peaksviewer.presenter.PeaksWorkspaceDataPresenter",
autospec=PeaksWorkspaceDataPresenter)
class PeaksViewerPresenterTest(unittest.TestCase):
def setUp(self):
self.mock_view = create_mock_view()
# -------------------- success tests -----------------------------
def test_presenter_subscribes_to_view_updates(self, _):
presenter = PeaksViewerPresenter(create_peaks_viewer_model([], 'r'), self.mock_view)
self.mock_view.subscribe.assert_called_once_with(presenter)
def test_view_populated_on_presenter_construction(self, mock_peaks_list_presenter):
name = "ws1"
fg_color = "r"
test_model = create_peaks_viewer_model([(1, 2, 3)], fg_color=fg_color, name=name)
PeaksViewerPresenter(test_model, self.mock_view)
self.mock_view.set_title.assert_called_once_with(name)
self.mock_view.set_peak_color.assert_called_once_with(fg_color)
# peaks list presenter construction
mock_peaks_list_presenter.assert_called_once_with(ANY, self.mock_view.table_view)
self.mock_view.table_view.enable_sorting.assert_called_once()
def test_clear_removes_painted_peaks(self, mock_peaks_list_presenter):
centers = ((1, 2, 3), (4, 5, 3.01))
slice_info = create_slice_info(centers, slice_value=3, slice_width=5)
test_model = create_peaks_viewer_model(centers, fg_color="r")
painter, axes = MagicMock(), MagicMock()
axes.get_xlim.return_value = (-1, 1)
painter.axes = axes
test_model.draw_peaks(slice_info, painter)
self.mock_view.painter = painter
presenter = PeaksViewerPresenter(test_model, self.mock_view)
presenter.notify(PeaksViewerPresenter.Event.ClearPeaks)
self.assertEqual(2, self.mock_view.painter.remove.call_count)
def test_slice_point_changed_clears_old_peaks_and_overlays_visible(
self, mock_peaks_list_presenter):
centers = ((1, 2, 3), (4, 5, 3.01))
slice_info = create_slice_info(centers, slice_value=3, slice_width=5)
test_model = create_peaks_viewer_model(centers, fg_color="r")
# draw some peaks first so we can test clearing them
painter, axes = MagicMock(), MagicMock()
axes.get_xlim.return_value = (-1, 1)
painter.axes = axes
test_model.draw_peaks(slice_info, painter)
# clear draw calls
painter.cross.reset_mock()
self.mock_view.painter = painter
self.mock_view.sliceinfo = create_slice_info(centers, slice_value=3, slice_width=5)
presenter = PeaksViewerPresenter(test_model, self.mock_view)
presenter.notify(PeaksViewerPresenter.Event.SlicePointChanged)
self.assertEqual(2, self.mock_view.painter.remove.call_count)
self.assertEqual(2, self.mock_view.painter.cross.call_count)
def test_single_peak_selection(self, mock_peaks_list_presenter):
name = 'ws1'
mock_model = create_mock_model(name)
viewlimits = (-1, 1), (-2, 2)
mock_model.viewlimits.return_value = viewlimits
self.mock_view.selected_index = 0
presenter = PeaksViewerPresenter(mock_model, self.mock_view)
presenter.notify(PeaksViewerPresenter.Event.PeakSelected)
mock_model.viewlimits.assert_called_once_with(0)
self.mock_view.set_axes_limits.assert_called_once_with(*viewlimits, auto_transform=False)
def test_add_delete_peaks(self, mock_peaks_list_presenter):
name = 'ws1'
mock_model = create_mock_model(name)
self.mock_view.sliceinfo.frame = 'Frame'
presenter = PeaksViewerPresenter(mock_model, self.mock_view)
presenter.add_peak([1, 2, 3])
mock_model.add_peak.assert_called_once_with([1, 2, 3], 'Frame')
presenter.delete_peak([1, 2, 3])
mock_model.delete_peak.assert_called_once_with([1, 2, 3], 'Frame')
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
moijes12/oh-mainline | vendor/packages/Django/django/template/loaders/eggs.py | 114 | 1209 | # Wrapper for loading templates from eggs via pkg_resources.resource_string.
from __future__ import unicode_literals
try:
from pkg_resources import resource_string
except ImportError:
resource_string = None
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils import six
class Loader(BaseLoader):
is_usable = resource_string is not None
def load_template_source(self, template_name, template_dirs=None):
"""
Loads templates from Python eggs via pkg_resource.resource_string.
For every installed app, it tries to get the resource (app, template_name).
"""
if resource_string is not None:
pkg_name = 'templates/' + template_name
for app in settings.INSTALLED_APPS:
try:
resource = resource_string(app, pkg_name)
except Exception:
continue
if not six.PY3:
resource = resource.decode(settings.FILE_CHARSET)
return (resource, 'egg:%s:%s' % (app, pkg_name))
raise TemplateDoesNotExist(template_name)
| agpl-3.0 |
qenter/vlc-android | toolchains/arm/lib/python2.7/stringprep.py | 278 | 13522 | # This file is generated by mkstringprep.py. DO NOT EDIT.
"""Library that exposes various tables found in the StringPrep RFC 3454.
There are two kinds of tables: sets, for which a member test is provided,
and mappings, for which a mapping function is provided.
"""
from unicodedata import ucd_3_2_0 as unicodedata
assert unicodedata.unidata_version == '3.2.0'
def in_table_a1(code):
if unicodedata.category(code) != 'Cn': return False
c = ord(code)
if 0xFDD0 <= c < 0xFDF0: return False
return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + range(65024,65040))
def in_table_b1(code):
return ord(code) in b1_set
b3_exceptions = {
0xb5:u'\u03bc', 0xdf:u'ss', 0x130:u'i\u0307', 0x149:u'\u02bcn',
0x17f:u's', 0x1f0:u'j\u030c', 0x345:u'\u03b9', 0x37a:u' \u03b9',
0x390:u'\u03b9\u0308\u0301', 0x3b0:u'\u03c5\u0308\u0301', 0x3c2:u'\u03c3', 0x3d0:u'\u03b2',
0x3d1:u'\u03b8', 0x3d2:u'\u03c5', 0x3d3:u'\u03cd', 0x3d4:u'\u03cb',
0x3d5:u'\u03c6', 0x3d6:u'\u03c0', 0x3f0:u'\u03ba', 0x3f1:u'\u03c1',
0x3f2:u'\u03c3', 0x3f5:u'\u03b5', 0x587:u'\u0565\u0582', 0x1e96:u'h\u0331',
0x1e97:u't\u0308', 0x1e98:u'w\u030a', 0x1e99:u'y\u030a', 0x1e9a:u'a\u02be',
0x1e9b:u'\u1e61', 0x1f50:u'\u03c5\u0313', 0x1f52:u'\u03c5\u0313\u0300', 0x1f54:u'\u03c5\u0313\u0301',
0x1f56:u'\u03c5\u0313\u0342', 0x1f80:u'\u1f00\u03b9', 0x1f81:u'\u1f01\u03b9', 0x1f82:u'\u1f02\u03b9',
0x1f83:u'\u1f03\u03b9', 0x1f84:u'\u1f04\u03b9', 0x1f85:u'\u1f05\u03b9', 0x1f86:u'\u1f06\u03b9',
0x1f87:u'\u1f07\u03b9', 0x1f88:u'\u1f00\u03b9', 0x1f89:u'\u1f01\u03b9', 0x1f8a:u'\u1f02\u03b9',
0x1f8b:u'\u1f03\u03b9', 0x1f8c:u'\u1f04\u03b9', 0x1f8d:u'\u1f05\u03b9', 0x1f8e:u'\u1f06\u03b9',
0x1f8f:u'\u1f07\u03b9', 0x1f90:u'\u1f20\u03b9', 0x1f91:u'\u1f21\u03b9', 0x1f92:u'\u1f22\u03b9',
0x1f93:u'\u1f23\u03b9', 0x1f94:u'\u1f24\u03b9', 0x1f95:u'\u1f25\u03b9', 0x1f96:u'\u1f26\u03b9',
0x1f97:u'\u1f27\u03b9', 0x1f98:u'\u1f20\u03b9', 0x1f99:u'\u1f21\u03b9', 0x1f9a:u'\u1f22\u03b9',
0x1f9b:u'\u1f23\u03b9', 0x1f9c:u'\u1f24\u03b9', 0x1f9d:u'\u1f25\u03b9', 0x1f9e:u'\u1f26\u03b9',
0x1f9f:u'\u1f27\u03b9', 0x1fa0:u'\u1f60\u03b9', 0x1fa1:u'\u1f61\u03b9', 0x1fa2:u'\u1f62\u03b9',
0x1fa3:u'\u1f63\u03b9', 0x1fa4:u'\u1f64\u03b9', 0x1fa5:u'\u1f65\u03b9', 0x1fa6:u'\u1f66\u03b9',
0x1fa7:u'\u1f67\u03b9', 0x1fa8:u'\u1f60\u03b9', 0x1fa9:u'\u1f61\u03b9', 0x1faa:u'\u1f62\u03b9',
0x1fab:u'\u1f63\u03b9', 0x1fac:u'\u1f64\u03b9', 0x1fad:u'\u1f65\u03b9', 0x1fae:u'\u1f66\u03b9',
0x1faf:u'\u1f67\u03b9', 0x1fb2:u'\u1f70\u03b9', 0x1fb3:u'\u03b1\u03b9', 0x1fb4:u'\u03ac\u03b9',
0x1fb6:u'\u03b1\u0342', 0x1fb7:u'\u03b1\u0342\u03b9', 0x1fbc:u'\u03b1\u03b9', 0x1fbe:u'\u03b9',
0x1fc2:u'\u1f74\u03b9', 0x1fc3:u'\u03b7\u03b9', 0x1fc4:u'\u03ae\u03b9', 0x1fc6:u'\u03b7\u0342',
0x1fc7:u'\u03b7\u0342\u03b9', 0x1fcc:u'\u03b7\u03b9', 0x1fd2:u'\u03b9\u0308\u0300', 0x1fd3:u'\u03b9\u0308\u0301',
0x1fd6:u'\u03b9\u0342', 0x1fd7:u'\u03b9\u0308\u0342', 0x1fe2:u'\u03c5\u0308\u0300', 0x1fe3:u'\u03c5\u0308\u0301',
0x1fe4:u'\u03c1\u0313', 0x1fe6:u'\u03c5\u0342', 0x1fe7:u'\u03c5\u0308\u0342', 0x1ff2:u'\u1f7c\u03b9',
0x1ff3:u'\u03c9\u03b9', 0x1ff4:u'\u03ce\u03b9', 0x1ff6:u'\u03c9\u0342', 0x1ff7:u'\u03c9\u0342\u03b9',
0x1ffc:u'\u03c9\u03b9', 0x20a8:u'rs', 0x2102:u'c', 0x2103:u'\xb0c',
0x2107:u'\u025b', 0x2109:u'\xb0f', 0x210b:u'h', 0x210c:u'h',
0x210d:u'h', 0x2110:u'i', 0x2111:u'i', 0x2112:u'l',
0x2115:u'n', 0x2116:u'no', 0x2119:u'p', 0x211a:u'q',
0x211b:u'r', 0x211c:u'r', 0x211d:u'r', 0x2120:u'sm',
0x2121:u'tel', 0x2122:u'tm', 0x2124:u'z', 0x2128:u'z',
0x212c:u'b', 0x212d:u'c', 0x2130:u'e', 0x2131:u'f',
0x2133:u'm', 0x213e:u'\u03b3', 0x213f:u'\u03c0', 0x2145:u'd',
0x3371:u'hpa', 0x3373:u'au', 0x3375:u'ov', 0x3380:u'pa',
0x3381:u'na', 0x3382:u'\u03bca', 0x3383:u'ma', 0x3384:u'ka',
0x3385:u'kb', 0x3386:u'mb', 0x3387:u'gb', 0x338a:u'pf',
0x338b:u'nf', 0x338c:u'\u03bcf', 0x3390:u'hz', 0x3391:u'khz',
0x3392:u'mhz', 0x3393:u'ghz', 0x3394:u'thz', 0x33a9:u'pa',
0x33aa:u'kpa', 0x33ab:u'mpa', 0x33ac:u'gpa', 0x33b4:u'pv',
0x33b5:u'nv', 0x33b6:u'\u03bcv', 0x33b7:u'mv', 0x33b8:u'kv',
0x33b9:u'mv', 0x33ba:u'pw', 0x33bb:u'nw', 0x33bc:u'\u03bcw',
0x33bd:u'mw', 0x33be:u'kw', 0x33bf:u'mw', 0x33c0:u'k\u03c9',
0x33c1:u'm\u03c9', 0x33c3:u'bq', 0x33c6:u'c\u2215kg', 0x33c7:u'co.',
0x33c8:u'db', 0x33c9:u'gy', 0x33cb:u'hp', 0x33cd:u'kk',
0x33ce:u'km', 0x33d7:u'ph', 0x33d9:u'ppm', 0x33da:u'pr',
0x33dc:u'sv', 0x33dd:u'wb', 0xfb00:u'ff', 0xfb01:u'fi',
0xfb02:u'fl', 0xfb03:u'ffi', 0xfb04:u'ffl', 0xfb05:u'st',
0xfb06:u'st', 0xfb13:u'\u0574\u0576', 0xfb14:u'\u0574\u0565', 0xfb15:u'\u0574\u056b',
0xfb16:u'\u057e\u0576', 0xfb17:u'\u0574\u056d', 0x1d400:u'a', 0x1d401:u'b',
0x1d402:u'c', 0x1d403:u'd', 0x1d404:u'e', 0x1d405:u'f',
0x1d406:u'g', 0x1d407:u'h', 0x1d408:u'i', 0x1d409:u'j',
0x1d40a:u'k', 0x1d40b:u'l', 0x1d40c:u'm', 0x1d40d:u'n',
0x1d40e:u'o', 0x1d40f:u'p', 0x1d410:u'q', 0x1d411:u'r',
0x1d412:u's', 0x1d413:u't', 0x1d414:u'u', 0x1d415:u'v',
0x1d416:u'w', 0x1d417:u'x', 0x1d418:u'y', 0x1d419:u'z',
0x1d434:u'a', 0x1d435:u'b', 0x1d436:u'c', 0x1d437:u'd',
0x1d438:u'e', 0x1d439:u'f', 0x1d43a:u'g', 0x1d43b:u'h',
0x1d43c:u'i', 0x1d43d:u'j', 0x1d43e:u'k', 0x1d43f:u'l',
0x1d440:u'm', 0x1d441:u'n', 0x1d442:u'o', 0x1d443:u'p',
0x1d444:u'q', 0x1d445:u'r', 0x1d446:u's', 0x1d447:u't',
0x1d448:u'u', 0x1d449:u'v', 0x1d44a:u'w', 0x1d44b:u'x',
0x1d44c:u'y', 0x1d44d:u'z', 0x1d468:u'a', 0x1d469:u'b',
0x1d46a:u'c', 0x1d46b:u'd', 0x1d46c:u'e', 0x1d46d:u'f',
0x1d46e:u'g', 0x1d46f:u'h', 0x1d470:u'i', 0x1d471:u'j',
0x1d472:u'k', 0x1d473:u'l', 0x1d474:u'm', 0x1d475:u'n',
0x1d476:u'o', 0x1d477:u'p', 0x1d478:u'q', 0x1d479:u'r',
0x1d47a:u's', 0x1d47b:u't', 0x1d47c:u'u', 0x1d47d:u'v',
0x1d47e:u'w', 0x1d47f:u'x', 0x1d480:u'y', 0x1d481:u'z',
0x1d49c:u'a', 0x1d49e:u'c', 0x1d49f:u'd', 0x1d4a2:u'g',
0x1d4a5:u'j', 0x1d4a6:u'k', 0x1d4a9:u'n', 0x1d4aa:u'o',
0x1d4ab:u'p', 0x1d4ac:u'q', 0x1d4ae:u's', 0x1d4af:u't',
0x1d4b0:u'u', 0x1d4b1:u'v', 0x1d4b2:u'w', 0x1d4b3:u'x',
0x1d4b4:u'y', 0x1d4b5:u'z', 0x1d4d0:u'a', 0x1d4d1:u'b',
0x1d4d2:u'c', 0x1d4d3:u'd', 0x1d4d4:u'e', 0x1d4d5:u'f',
0x1d4d6:u'g', 0x1d4d7:u'h', 0x1d4d8:u'i', 0x1d4d9:u'j',
0x1d4da:u'k', 0x1d4db:u'l', 0x1d4dc:u'm', 0x1d4dd:u'n',
0x1d4de:u'o', 0x1d4df:u'p', 0x1d4e0:u'q', 0x1d4e1:u'r',
0x1d4e2:u's', 0x1d4e3:u't', 0x1d4e4:u'u', 0x1d4e5:u'v',
0x1d4e6:u'w', 0x1d4e7:u'x', 0x1d4e8:u'y', 0x1d4e9:u'z',
0x1d504:u'a', 0x1d505:u'b', 0x1d507:u'd', 0x1d508:u'e',
0x1d509:u'f', 0x1d50a:u'g', 0x1d50d:u'j', 0x1d50e:u'k',
0x1d50f:u'l', 0x1d510:u'm', 0x1d511:u'n', 0x1d512:u'o',
0x1d513:u'p', 0x1d514:u'q', 0x1d516:u's', 0x1d517:u't',
0x1d518:u'u', 0x1d519:u'v', 0x1d51a:u'w', 0x1d51b:u'x',
0x1d51c:u'y', 0x1d538:u'a', 0x1d539:u'b', 0x1d53b:u'd',
0x1d53c:u'e', 0x1d53d:u'f', 0x1d53e:u'g', 0x1d540:u'i',
0x1d541:u'j', 0x1d542:u'k', 0x1d543:u'l', 0x1d544:u'm',
0x1d546:u'o', 0x1d54a:u's', 0x1d54b:u't', 0x1d54c:u'u',
0x1d54d:u'v', 0x1d54e:u'w', 0x1d54f:u'x', 0x1d550:u'y',
0x1d56c:u'a', 0x1d56d:u'b', 0x1d56e:u'c', 0x1d56f:u'd',
0x1d570:u'e', 0x1d571:u'f', 0x1d572:u'g', 0x1d573:u'h',
0x1d574:u'i', 0x1d575:u'j', 0x1d576:u'k', 0x1d577:u'l',
0x1d578:u'm', 0x1d579:u'n', 0x1d57a:u'o', 0x1d57b:u'p',
0x1d57c:u'q', 0x1d57d:u'r', 0x1d57e:u's', 0x1d57f:u't',
0x1d580:u'u', 0x1d581:u'v', 0x1d582:u'w', 0x1d583:u'x',
0x1d584:u'y', 0x1d585:u'z', 0x1d5a0:u'a', 0x1d5a1:u'b',
0x1d5a2:u'c', 0x1d5a3:u'd', 0x1d5a4:u'e', 0x1d5a5:u'f',
0x1d5a6:u'g', 0x1d5a7:u'h', 0x1d5a8:u'i', 0x1d5a9:u'j',
0x1d5aa:u'k', 0x1d5ab:u'l', 0x1d5ac:u'm', 0x1d5ad:u'n',
0x1d5ae:u'o', 0x1d5af:u'p', 0x1d5b0:u'q', 0x1d5b1:u'r',
0x1d5b2:u's', 0x1d5b3:u't', 0x1d5b4:u'u', 0x1d5b5:u'v',
0x1d5b6:u'w', 0x1d5b7:u'x', 0x1d5b8:u'y', 0x1d5b9:u'z',
0x1d5d4:u'a', 0x1d5d5:u'b', 0x1d5d6:u'c', 0x1d5d7:u'd',
0x1d5d8:u'e', 0x1d5d9:u'f', 0x1d5da:u'g', 0x1d5db:u'h',
0x1d5dc:u'i', 0x1d5dd:u'j', 0x1d5de:u'k', 0x1d5df:u'l',
0x1d5e0:u'm', 0x1d5e1:u'n', 0x1d5e2:u'o', 0x1d5e3:u'p',
0x1d5e4:u'q', 0x1d5e5:u'r', 0x1d5e6:u's', 0x1d5e7:u't',
0x1d5e8:u'u', 0x1d5e9:u'v', 0x1d5ea:u'w', 0x1d5eb:u'x',
0x1d5ec:u'y', 0x1d5ed:u'z', 0x1d608:u'a', 0x1d609:u'b',
0x1d60a:u'c', 0x1d60b:u'd', 0x1d60c:u'e', 0x1d60d:u'f',
0x1d60e:u'g', 0x1d60f:u'h', 0x1d610:u'i', 0x1d611:u'j',
0x1d612:u'k', 0x1d613:u'l', 0x1d614:u'm', 0x1d615:u'n',
0x1d616:u'o', 0x1d617:u'p', 0x1d618:u'q', 0x1d619:u'r',
0x1d61a:u's', 0x1d61b:u't', 0x1d61c:u'u', 0x1d61d:u'v',
0x1d61e:u'w', 0x1d61f:u'x', 0x1d620:u'y', 0x1d621:u'z',
0x1d63c:u'a', 0x1d63d:u'b', 0x1d63e:u'c', 0x1d63f:u'd',
0x1d640:u'e', 0x1d641:u'f', 0x1d642:u'g', 0x1d643:u'h',
0x1d644:u'i', 0x1d645:u'j', 0x1d646:u'k', 0x1d647:u'l',
0x1d648:u'm', 0x1d649:u'n', 0x1d64a:u'o', 0x1d64b:u'p',
0x1d64c:u'q', 0x1d64d:u'r', 0x1d64e:u's', 0x1d64f:u't',
0x1d650:u'u', 0x1d651:u'v', 0x1d652:u'w', 0x1d653:u'x',
0x1d654:u'y', 0x1d655:u'z', 0x1d670:u'a', 0x1d671:u'b',
0x1d672:u'c', 0x1d673:u'd', 0x1d674:u'e', 0x1d675:u'f',
0x1d676:u'g', 0x1d677:u'h', 0x1d678:u'i', 0x1d679:u'j',
0x1d67a:u'k', 0x1d67b:u'l', 0x1d67c:u'm', 0x1d67d:u'n',
0x1d67e:u'o', 0x1d67f:u'p', 0x1d680:u'q', 0x1d681:u'r',
0x1d682:u's', 0x1d683:u't', 0x1d684:u'u', 0x1d685:u'v',
0x1d686:u'w', 0x1d687:u'x', 0x1d688:u'y', 0x1d689:u'z',
0x1d6a8:u'\u03b1', 0x1d6a9:u'\u03b2', 0x1d6aa:u'\u03b3', 0x1d6ab:u'\u03b4',
0x1d6ac:u'\u03b5', 0x1d6ad:u'\u03b6', 0x1d6ae:u'\u03b7', 0x1d6af:u'\u03b8',
0x1d6b0:u'\u03b9', 0x1d6b1:u'\u03ba', 0x1d6b2:u'\u03bb', 0x1d6b3:u'\u03bc',
0x1d6b4:u'\u03bd', 0x1d6b5:u'\u03be', 0x1d6b6:u'\u03bf', 0x1d6b7:u'\u03c0',
0x1d6b8:u'\u03c1', 0x1d6b9:u'\u03b8', 0x1d6ba:u'\u03c3', 0x1d6bb:u'\u03c4',
0x1d6bc:u'\u03c5', 0x1d6bd:u'\u03c6', 0x1d6be:u'\u03c7', 0x1d6bf:u'\u03c8',
0x1d6c0:u'\u03c9', 0x1d6d3:u'\u03c3', 0x1d6e2:u'\u03b1', 0x1d6e3:u'\u03b2',
0x1d6e4:u'\u03b3', 0x1d6e5:u'\u03b4', 0x1d6e6:u'\u03b5', 0x1d6e7:u'\u03b6',
0x1d6e8:u'\u03b7', 0x1d6e9:u'\u03b8', 0x1d6ea:u'\u03b9', 0x1d6eb:u'\u03ba',
0x1d6ec:u'\u03bb', 0x1d6ed:u'\u03bc', 0x1d6ee:u'\u03bd', 0x1d6ef:u'\u03be',
0x1d6f0:u'\u03bf', 0x1d6f1:u'\u03c0', 0x1d6f2:u'\u03c1', 0x1d6f3:u'\u03b8',
0x1d6f4:u'\u03c3', 0x1d6f5:u'\u03c4', 0x1d6f6:u'\u03c5', 0x1d6f7:u'\u03c6',
0x1d6f8:u'\u03c7', 0x1d6f9:u'\u03c8', 0x1d6fa:u'\u03c9', 0x1d70d:u'\u03c3',
0x1d71c:u'\u03b1', 0x1d71d:u'\u03b2', 0x1d71e:u'\u03b3', 0x1d71f:u'\u03b4',
0x1d720:u'\u03b5', 0x1d721:u'\u03b6', 0x1d722:u'\u03b7', 0x1d723:u'\u03b8',
0x1d724:u'\u03b9', 0x1d725:u'\u03ba', 0x1d726:u'\u03bb', 0x1d727:u'\u03bc',
0x1d728:u'\u03bd', 0x1d729:u'\u03be', 0x1d72a:u'\u03bf', 0x1d72b:u'\u03c0',
0x1d72c:u'\u03c1', 0x1d72d:u'\u03b8', 0x1d72e:u'\u03c3', 0x1d72f:u'\u03c4',
0x1d730:u'\u03c5', 0x1d731:u'\u03c6', 0x1d732:u'\u03c7', 0x1d733:u'\u03c8',
0x1d734:u'\u03c9', 0x1d747:u'\u03c3', 0x1d756:u'\u03b1', 0x1d757:u'\u03b2',
0x1d758:u'\u03b3', 0x1d759:u'\u03b4', 0x1d75a:u'\u03b5', 0x1d75b:u'\u03b6',
0x1d75c:u'\u03b7', 0x1d75d:u'\u03b8', 0x1d75e:u'\u03b9', 0x1d75f:u'\u03ba',
0x1d760:u'\u03bb', 0x1d761:u'\u03bc', 0x1d762:u'\u03bd', 0x1d763:u'\u03be',
0x1d764:u'\u03bf', 0x1d765:u'\u03c0', 0x1d766:u'\u03c1', 0x1d767:u'\u03b8',
0x1d768:u'\u03c3', 0x1d769:u'\u03c4', 0x1d76a:u'\u03c5', 0x1d76b:u'\u03c6',
0x1d76c:u'\u03c7', 0x1d76d:u'\u03c8', 0x1d76e:u'\u03c9', 0x1d781:u'\u03c3',
0x1d790:u'\u03b1', 0x1d791:u'\u03b2', 0x1d792:u'\u03b3', 0x1d793:u'\u03b4',
0x1d794:u'\u03b5', 0x1d795:u'\u03b6', 0x1d796:u'\u03b7', 0x1d797:u'\u03b8',
0x1d798:u'\u03b9', 0x1d799:u'\u03ba', 0x1d79a:u'\u03bb', 0x1d79b:u'\u03bc',
0x1d79c:u'\u03bd', 0x1d79d:u'\u03be', 0x1d79e:u'\u03bf', 0x1d79f:u'\u03c0',
0x1d7a0:u'\u03c1', 0x1d7a1:u'\u03b8', 0x1d7a2:u'\u03c3', 0x1d7a3:u'\u03c4',
0x1d7a4:u'\u03c5', 0x1d7a5:u'\u03c6', 0x1d7a6:u'\u03c7', 0x1d7a7:u'\u03c8',
0x1d7a8:u'\u03c9', 0x1d7bb:u'\u03c3', }
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None: return r
return code.lower()
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize("NFKC", al)
bl = u"".join([map_table_b3(ch) for ch in b])
c = unicodedata.normalize("NFKC", bl)
if b != c:
return c
else:
return al
def in_table_c11(code):
return code == u" "
def in_table_c12(code):
return unicodedata.category(code) == "Zs" and code != u" "
def in_table_c11_c12(code):
return unicodedata.category(code) == "Zs"
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == "Cc"
c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + range(8288,8292) + range(8298,8304) + range(65529,65533) + range(119155,119163))
def in_table_c22(code):
c = ord(code)
if c < 128: return False
if unicodedata.category(code) == "Cc": return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == "Cc" or \
ord(code) in c22_specials
def in_table_c3(code):
return unicodedata.category(code) == "Co"
def in_table_c4(code):
c = ord(code)
if c < 0xFDD0: return False
if c < 0xFDF0: return True
return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
def in_table_c5(code):
return unicodedata.category(code) == "Cs"
c6_set = set(range(65529,65534))
def in_table_c6(code):
return ord(code) in c6_set
c7_set = set(range(12272,12284))
def in_table_c7(code):
return ord(code) in c7_set
c8_set = set([832, 833, 8206, 8207] + range(8234,8239) + range(8298,8304))
def in_table_c8(code):
return ord(code) in c8_set
c9_set = set([917505] + range(917536,917632))
def in_table_c9(code):
return ord(code) in c9_set
def in_table_d1(code):
return unicodedata.bidirectional(code) in ("R","AL")
def in_table_d2(code):
return unicodedata.bidirectional(code) == "L"
| gpl-2.0 |
cheukyin699/linux | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
yuifan/pexus4_external_gtest | test/gtest_throw_on_failure_test.py | 2917 | 5766 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
geekaia/edx-platform | common/lib/xmodule/xmodule/tests/test_annotatable_module.py | 27 | 5778 | """Module annotatable tests"""
import unittest
from lxml import etree
from mock import Mock
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.annotatable_module import AnnotatableModule
from opaque_keys.edx.locations import Location
from . import get_test_system
class AnnotatableModuleTestCase(unittest.TestCase):
sample_xml = '''
<annotatable display_name="Iliad">
<instructions>Read the text.</instructions>
<p>
<annotation body="first">Sing</annotation>,
<annotation title="goddess" body="second">O goddess</annotation>,
<annotation title="anger" body="third" highlight="blue">the anger of Achilles son of Peleus</annotation>,
that brought <i>countless</i> ills upon the Achaeans. Many a brave soul did it send
hurrying down to Hades, and many a hero did it yield a prey to dogs and
<div style="font-weight:bold"><annotation body="fourth" problem="4">vultures</annotation>, for so were the counsels
of Jove fulfilled from the day on which the son of Atreus, king of men, and great
Achilles, first fell out with one another.</div>
</p>
<annotation title="footnote" body="the end">The Iliad of Homer by Samuel Butler</annotation>
</annotatable>
'''
def setUp(self):
self.annotatable = AnnotatableModule(
Mock(),
get_test_system(),
DictFieldData({'data': self.sample_xml}),
ScopeIds(None, None, None, Location('org', 'course', 'run', 'category', 'name', None))
)
def test_annotation_data_attr(self):
el = etree.fromstring('<annotation title="bar" body="foo" problem="0">test</annotation>')
expected_attr = {
'data-comment-body': {'value': 'foo', '_delete': 'body' },
'data-comment-title': {'value': 'bar', '_delete': 'title'},
'data-problem-id': {'value': '0', '_delete': 'problem'}
}
actual_attr = self.annotatable._get_annotation_data_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_annotation_class_attr_default(self):
xml = '<annotation title="x" body="y" problem="0">test</annotation>'
el = etree.fromstring(xml)
expected_attr = { 'class': { 'value': 'annotatable-span highlight' } }
actual_attr = self.annotatable._get_annotation_class_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_annotation_class_attr_with_valid_highlight(self):
xml = '<annotation title="x" body="y" problem="0" highlight="{highlight}">test</annotation>'
for color in self.annotatable.highlight_colors:
el = etree.fromstring(xml.format(highlight=color))
value = 'annotatable-span highlight highlight-{highlight}'.format(highlight=color)
expected_attr = { 'class': {
'value': value,
'_delete': 'highlight' }
}
actual_attr = self.annotatable._get_annotation_class_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_annotation_class_attr_with_invalid_highlight(self):
xml = '<annotation title="x" body="y" problem="0" highlight="{highlight}">test</annotation>'
for invalid_color in ['rainbow', 'blink', 'invisible', '', None]:
el = etree.fromstring(xml.format(highlight=invalid_color))
expected_attr = { 'class': {
'value': 'annotatable-span highlight',
'_delete': 'highlight' }
}
actual_attr = self.annotatable._get_annotation_class_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_render_annotation(self):
expected_html = '<span class="annotatable-span highlight highlight-yellow" data-comment-title="x" data-comment-body="y" data-problem-id="0">z</span>'
expected_el = etree.fromstring(expected_html)
actual_el = etree.fromstring('<annotation title="x" body="y" problem="0" highlight="yellow">z</annotation>')
self.annotatable._render_annotation(0, actual_el)
self.assertEqual(expected_el.tag, actual_el.tag)
self.assertEqual(expected_el.text, actual_el.text)
self.assertDictEqual(dict(expected_el.attrib), dict(actual_el.attrib))
def test_render_content(self):
content = self.annotatable._render_content()
el = etree.fromstring(content)
self.assertEqual('div', el.tag, 'root tag is a div')
expected_num_annotations = 5
actual_num_annotations = el.xpath('count(//span[contains(@class,"annotatable-span")])')
self.assertEqual(expected_num_annotations, actual_num_annotations, 'check number of annotations')
def test_get_html(self):
context = self.annotatable.get_html()
for key in ['display_name', 'element_id', 'content_html', 'instructions_html']:
self.assertIn(key, context)
def test_extract_instructions(self):
xmltree = etree.fromstring(self.sample_xml)
expected_xml = u"<div>Read the text.</div>"
actual_xml = self.annotatable._extract_instructions(xmltree)
self.assertIsNotNone(actual_xml)
self.assertEqual(expected_xml.strip(), actual_xml.strip())
xmltree = etree.fromstring('<annotatable>foo</annotatable>')
actual = self.annotatable._extract_instructions(xmltree)
self.assertIsNone(actual)
| agpl-3.0 |
agconti/njode | env/lib/python2.7/site-packages/pygments/unistring.py | 29 | 51110 | # -*- coding: utf-8 -*-
"""
pygments.unistring
~~~~~~~~~~~~~~~~~~
Strings of all Unicode characters of a certain category.
Used for matching in Unicode-aware languages. Run to regenerate.
Inspired by chartypes_create.py from the MoinMoin project.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
Cc = u'\x00-\x1f\x7f-\x9f'
Cf = u'\xad\u0600-\u0604\u061c\u06dd\u070f\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb'
Cn = u'\u0378-\u0379\u037f-\u0383\u038b\u038d\u03a2\u0528-\u0530\u0557-\u0558\u0560\u0588\u058b-\u058e\u0590\u05c8-\u05cf\u05eb-\u05ef\u05f5-\u05ff\u0605\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07ff\u082e-\u082f\u083f\u085c-\u085d\u085f-\u089f\u08a1\u08ad-\u08e3\u08ff\u0978\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09fc-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a76-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5a-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c80-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0d01\u0d04\u0d0d\u0d11\u0d3b-\u0d3c\u0d45\u0d49\u0d4f-\u0d56\u0d58-\u0d5f\u0d64-\u0d65\u0d76-\u0d78\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f5-\u13ff\u169d-\u169f\u16f1-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1878-\u187f\u18ab-\u18af\u18f6-\u18ff\u191d-\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c80-\u1cbf\u1cc8-\u1ccf\u1cf7-\u1cff\u1de7-\u1dfb\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20bb-\u20cf\u20f1-\u20ff\u218a-\u218f\u23f4-\u23ff\u2427-\u243f\u244b-\u245f\u2700\u2b4d-\u2b4f\u2b5a-\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e3c-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u312e-\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9fcd-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua698-\ua69e\ua6f8-\ua6ff\ua78f\ua794-\ua79f\ua7ab-\ua7f7\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c5-\ua8cd\ua8da-\ua8df\ua8fc-\ua8ff\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9e0-\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaa7c-\uaa7f\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f-\uabbf\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe27-\ufe2f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff'
Co = u'\ue000-\uf8ff'
try:
Cs = eval(r"u'\ud800-\udbff\\\udc00\udc01-\udfff'")
except UnicodeDecodeError:
Cs = '' # Jython can't handle isolated surrogates
Ll = u'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0561-\u0587\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7fa\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a'
Lm = u'\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\uaa70\uaadd\uaaf3-\uaaf4\uff70\uff9e-\uff9f'
Lo = u'\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05f0-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0977\u0979-\u097f\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58-\u0c59\u0c60-\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10d0-\u10fa\u10fd-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc'
Lt = u'\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc'
Lu = u'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa\uff21-\uff3a'
Mc = u'\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u19b0-\u19c0\u19c8-\u19c9\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1bac-\u1bad\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec'
Me = u'\u0488-\u0489\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672'
Mn = u'\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08e4-\u08fe\u0900-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1dc0-\u1de6\u1dfc-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4\ua8e0-\ua8f1\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe26'
Nd = u'0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19'
Nl = u'\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef'
No = u'\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d70-\u0d75\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835'
Pc = u'_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f'
Pd = u'\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d'
Pe = u')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3f\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
Pf = u'\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21'
Pi = u'\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20'
Po = u"!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u0af0\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65"
Ps = u'(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3e\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
Sc = u'$\xa2-\xa5\u058f\u060b\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20ba\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6'
Sk = u'\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\ufbb2-\ufbc1\uff3e\uff40\uffe3'
Sm = u'+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec'
So = u'\xa6\xa9\xae\xb0\u0482\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u23f3\u2400-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u26ff\u2701-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b50-\u2b59\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd'
Zl = u'\u2028'
Zp = u'\u2029'
Zs = u' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000'
xid_continue = u'0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05f0-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u0800-\u082d\u0840-\u085b\u08a0\u08a2-\u08ac\u08e4-\u08fe\u0900-\u0963\u0966-\u096f\u0971-\u0977\u0979-\u097f\u0981-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c59\u0c60-\u0c63\u0c66-\u0c6f\u0c82-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d02-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d57\u0d60-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1877\u1880-\u18aa\u18b0-\u18f5\u1900-\u191c\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1cd0-\u1cd2\u1cd4-\u1cf6\u1d00-\u1de6\u1dfc-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua697\ua69f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua827\ua840-\ua873\ua880-\ua8c4\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua900-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaa7b\uaa80-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe26\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc'
xid_start = u'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58-\u0c59\u0c60-\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc'
if sys.maxunicode > 0xFFFF:
# non-BMP characters, use only on wide Unicode builds
Cf += u'\U000110bd\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f'
Cn += u'\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018b-\U0001018f\U0001019c-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102ff\U0001031f\U00010324-\U0001032f\U0001034b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U00010860-\U000108ff\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bd\U000109c0-\U000109ff\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a34-\U00010a37\U00010a3b-\U00010a3e\U00010a48-\U00010a4f\U00010a59-\U00010a5f\U00010a80-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b80-\U00010bff\U00010c49-\U00010e5f\U00010e7f-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107f\U000110c2-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011144-\U0001117f\U000111c9-\U000111cf\U000111da-\U0001167f\U000116b8-\U000116bf\U000116ca-\U00011fff\U0001236f-\U000123ff\U00012463-\U0001246f\U00012474-\U00012fff\U0001342f-\U000167ff\U00016a39-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U0001afff\U0001b002-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1de-\U0001d1ff\U0001d246-\U0001d2ff\U0001d357-\U0001d35f\U0001d372-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001d800-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0bf-\U0001f0c0\U0001f0d0\U0001f0e0-\U0001f0ff\U0001f10b-\U0001f10f\U0001f12f\U0001f16c-\U0001f16f\U0001f19b-\U0001f1e5\U0001f203-\U0001f20f\U0001f23b-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f2ff\U0001f321-\U0001f32f\U0001f336\U0001f37d-\U0001f37f\U0001f394-\U0001f39f\U0001f3c5\U0001f3cb-\U0001f3df\U0001f3f1-\U0001f3ff\U0001f43f\U0001f441\U0001f4f8\U0001f4fd-\U0001f4ff\U0001f53e-\U0001f53f\U0001f544-\U0001f54f\U0001f568-\U0001f5fa\U0001f641-\U0001f644\U0001f650-\U0001f67f\U0001f6c6-\U0001f6ff\U0001f774-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff'
Co += u'\U000f0000-\U000ffffd\U00100000-\U0010fffd'
Ll += u'\U00010428-\U0001044f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb'
Lm += u'\U00016f93-\U00016f9f'
Lo += u'\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031e\U00010330-\U00010340\U00010342-\U00010349\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a60-\U00010a7c\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010c00-\U00010c48\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011183-\U000111b2\U000111c1-\U000111c4\U00011680-\U000116aa\U00012000-\U0001236e\U00013000-\U0001342e\U00016800-\U00016a38\U00016f00-\U00016f44\U00016f50\U0001b000-\U0001b001\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002f800-\U0002fa1d'
Lu += u'\U00010400-\U00010427\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca'
Mc += u'\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U000116ac\U000116ae-\U000116af\U000116b6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172'
Mn += u'\U000101fd\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00011001\U00011038-\U00011046\U00011080-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011180-\U00011181\U000111b6-\U000111be\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U00016f8f-\U00016f92\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U000e0100-\U000e01ef'
Nd += u'\U000104a0-\U000104a9\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000116c0-\U000116c9\U0001d7ce-\U0001d7ff'
Nl += u'\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U00012462'
No += u'\U00010107-\U00010133\U00010175-\U00010178\U0001018a\U00010320-\U00010323\U00010858-\U0001085f\U00010916-\U0001091b\U00010a40-\U00010a47\U00010a7d-\U00010a7e\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010e60-\U00010e7e\U00011052-\U00011065\U0001d360-\U0001d371\U0001f100-\U0001f10a'
Po += u'\U00010100-\U00010102\U0001039f\U000103d0\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010b39-\U00010b3f\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U000111c5-\U000111c8\U00012470-\U00012473'
Sm += u'\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1'
So += u'\U00010137-\U0001013f\U00010179-\U00010189\U00010190-\U0001019b\U000101d0-\U000101fc\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1dd\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0be\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0df\U0001f110-\U0001f12e\U0001f130-\U0001f16b\U0001f170-\U0001f19a\U0001f1e6-\U0001f202\U0001f210-\U0001f23a\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f300-\U0001f320\U0001f330-\U0001f335\U0001f337-\U0001f37c\U0001f380-\U0001f393\U0001f3a0-\U0001f3c4\U0001f3c6-\U0001f3ca\U0001f3e0-\U0001f3f0\U0001f400-\U0001f43e\U0001f440\U0001f442-\U0001f4f7\U0001f4f9-\U0001f4fc\U0001f500-\U0001f53d\U0001f540-\U0001f543\U0001f550-\U0001f567\U0001f5fb-\U0001f640\U0001f645-\U0001f64f\U0001f680-\U0001f6c5\U0001f700-\U0001f773'
xid_continue += u'\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031e\U00010330-\U0001034a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010c00-\U00010c48\U00011000-\U00011046\U00011066-\U0001106f\U00011080-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011180-\U000111c4\U000111d0-\U000111d9\U00011680-\U000116b7\U000116c0-\U000116c9\U00012000-\U0001236e\U00012400-\U00012462\U00013000-\U0001342e\U00016800-\U00016a38\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U0001b000-\U0001b001\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002f800-\U0002fa1d\U000e0100-\U000e01ef'
xid_start += u'\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031e\U00010330-\U0001034a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a60-\U00010a7c\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010c00-\U00010c48\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011183-\U000111b2\U000111c1-\U000111c4\U00011680-\U000116aa\U00012000-\U0001236e\U00012400-\U00012462\U00013000-\U0001342e\U00016800-\U00016a38\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U0001b000-\U0001b001\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002f800-\U0002fa1d'
cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs']
# Generated from unidata 6.3.0
def combine(*args):
return u''.join(globals()[cat] for cat in args)
def allexcept(*args):
newcats = cats[:]
for arg in args:
newcats.remove(arg)
return u''.join(globals()[cat] for cat in newcats)
def _handle_runs(char_list):
buf = []
for c in char_list:
if len(c) == 1:
if buf and buf[-1][1] == chr(ord(c)-1):
buf[-1] = (buf[-1][0], c)
else:
buf.append((c, c))
else:
buf.append((c, c))
for a, b in buf:
if a == b:
yield a
else:
yield u'%s-%s' % (a, b)
if __name__ == '__main__':
import unicodedata
# we need Py3 for the determination of the XID_* properties
if sys.version_info[:2] < (3, 3):
raise RuntimeError('this file must be regenerated with Python 3.3+')
categories_bmp = {'xid_start': [], 'xid_continue': []}
categories_nonbmp = {'xid_start': [], 'xid_continue': []}
with open(__file__) as fp:
content = fp.read()
header = content[:content.find('Cc =')]
footer = content[content.find("def combine("):]
for code in range(0x110000):
c = chr(code)
cat = unicodedata.category(c)
if ord(c) == 0xdc00:
# Hack to avoid combining this combining with the preceeding high
# surrogate, 0xdbff, when doing a repr.
c = u'\\' + c
elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e):
# Escape regex metachars.
c = u'\\' + c
cat_dic = categories_bmp if code < 0x10000 else categories_nonbmp
cat_dic.setdefault(cat, []).append(c)
# XID_START and XID_CONTINUE are special categories used for matching
# identifiers in Python 3.
if c.isidentifier():
cat_dic['xid_start'].append(c)
if ('a' + c).isidentifier():
cat_dic['xid_continue'].append(c)
with open(__file__, 'w') as fp:
fp.write(header)
for cat in sorted(categories_bmp):
val = u''.join(_handle_runs(categories_bmp[cat]))
if cat == 'Cs':
# Jython can't handle isolated surrogates
fp.write("""\
try:
Cs = eval(r"u%s")
except UnicodeDecodeError:
Cs = '' # Jython can't handle isolated surrogates\n\n""" % ascii(val))
else:
fp.write('%s = u%a\n\n' % (cat, val))
fp.write('if sys.maxunicode > 0xFFFF:\n')
fp.write(' # non-BMP characters, use only on wide Unicode builds\n')
for cat in sorted(categories_nonbmp):
# no special case for Cs needed, since there are no surrogates
# in the higher planes
val = u''.join(_handle_runs(categories_nonbmp[cat]))
fp.write(' %s += u%a\n\n' % (cat, val))
cats = sorted(categories_bmp)
cats.remove('xid_start')
cats.remove('xid_continue')
fp.write('cats = %r\n\n' % cats)
fp.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,))
fp.write(footer)
| bsd-3-clause |
srjoglekar246/sympy | doc/ext/docscrape_sphinx.py | 2 | 7957 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
## Lines that are commented out are used to make the
## autosummary:: table. Since SymPy does not use the
## autosummary:: functionality, it is easiest to just comment it
## out.
#autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
#if not self._obj or hasattr(self._obj, param):
# autosum += [" %s%s" % (prefix, param)]
#else:
others.append((param, param_type, desc))
#if autosum:
# out += ['.. autosummary::', ' :toctree:', '']
# out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
tianyi33/simple_blog | django/conf/locale/sl/formats.py | 106 | 1745 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j. M. Y'
SHORT_DATETIME_FORMAT = 'j.n.Y. H:i'
FIRST_DAY_OF_WEEK = 0
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit |
citrix-openstack-build/python-neutronclient | neutronclient/neutron/v2_0/credential.py | 6 | 2705 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from neutronclient.neutron import v2_0 as neutronV20
class ListCredential(neutronV20.ListCommand):
"""List credentials that belong to a given tenant."""
resource = 'credential'
log = logging.getLogger(__name__ + '.ListCredential')
_formatters = {}
list_columns = ['credential_id', 'credential_name', 'user_name',
'password', 'type']
class ShowCredential(neutronV20.ShowCommand):
"""Show information of a given credential."""
resource = 'credential'
log = logging.getLogger(__name__ + '.ShowCredential')
allow_names = False
class CreateCredential(neutronV20.CreateCommand):
"""Creates a credential."""
resource = 'credential'
log = logging.getLogger(__name__ + '.CreateCredential')
def add_known_arguments(self, parser):
parser.add_argument(
'credential_name',
help='Name/Ip address for Credential')
parser.add_argument(
'credential_type',
help='Type of the Credential')
parser.add_argument(
'--username',
help='Username for the credential')
parser.add_argument(
'--password',
help='Password for the credential')
def args2body(self, parsed_args):
body = {'credential': {
'credential_name': parsed_args.credential_name}}
if parsed_args.credential_type:
body['credential'].update({'type':
parsed_args.credential_type})
if parsed_args.username:
body['credential'].update({'user_name':
parsed_args.username})
if parsed_args.password:
body['credential'].update({'password':
parsed_args.password})
return body
class DeleteCredential(neutronV20.DeleteCommand):
"""Delete a given credential."""
log = logging.getLogger(__name__ + '.DeleteCredential')
resource = 'credential'
allow_names = False
| apache-2.0 |
ocadotechnology/boto | tests/integration/ses/test_connection.py | 113 | 1495 | from tests.unit import unittest
from boto.ses.connection import SESConnection
from boto.ses import exceptions
class SESConnectionTest(unittest.TestCase):
ses = True
def setUp(self):
self.ses = SESConnection()
def test_get_dkim_attributes(self):
response = self.ses.get_identity_dkim_attributes(['example.com'])
# Verify we get the structure we expect, we don't care about the
# values.
self.assertTrue('GetIdentityDkimAttributesResponse' in response)
self.assertTrue('GetIdentityDkimAttributesResult' in
response['GetIdentityDkimAttributesResponse'])
self.assertTrue(
'DkimAttributes' in response['GetIdentityDkimAttributesResponse']
['GetIdentityDkimAttributesResult'])
def test_set_identity_dkim_enabled(self):
# This api call should fail because have not verified the domain,
# so we can test that it at least fails we we expect.
with self.assertRaises(exceptions.SESIdentityNotVerifiedError):
self.ses.set_identity_dkim_enabled('example.com', True)
def test_verify_domain_dkim(self):
# This api call should fail because have not confirmed the domain,
# so we can test that it at least fails we we expect.
with self.assertRaises(exceptions.SESDomainNotConfirmedError):
self.ses.verify_domain_dkim('example.com')
if __name__ == '__main__':
unittest.main()
| mit |
yogeshraheja/yoggan_factory | images/bundle-centos-mongodb/library/mongodb_authent.py | 8 | 11801 | #!/usr/bin/python
# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
# (c) 2014, Epic Games, Inc.
# (c) 2015, Orange, Inc. <alexis.lacroix@orange.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: mongodb_authent
short_description: Adds or removes a user from a MongoDB database.
description:
- Adds or removes a user from a MongoDB database.
version_added: "1.1"
options:
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- The host running the database
required: false
default: localhost
login_port:
description:
- The port to connect to
required: false
default: 27017
login_database:
version_added: "2.0"
description:
- The database where login credentials are stored
required: false
default: null
replica_set:
version_added: "1.6"
description:
- Replica set to connect to (automatically connects to primary for writes)
required: false
default: null
database:
description:
- The name of the database to add/remove the user from
required: true
name:
description:
- The name of the user to add or remove
required: true
default: null
aliases: [ 'user' ]
password:
description:
- The password to use for the user
required: false
default: null
ssl:
version_added: "1.8"
description:
- Whether to use an SSL connection when connecting to the database
default: False
roles:
version_added: "1.3"
description:
- "The database user roles valid values are one or more of the following: read, 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'"
- This param requires mongodb 2.4+ and pymongo 2.5+
required: false
default: "readWrite"
state:
state:
description:
- The database user state
required: false
default: present
choices: [ 'present', 'absent', 'admin' ]
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "2.1"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author: "Elliott Foster (@elliotttf)"
'''
EXAMPLES = '''
# Create 'burgers' database user with name 'bob' and password '12345'.
- mongodb_authent: database=burgers name=bob password=12345 state=present
# Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
- mongodb_authent: database=burgers name=bob password=12345 state=present ssl=True
# Delete 'burgers' database user with name 'bob'.
- mongodb_authent: database=burgers name=bob state=absent
# Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
- mongodb_authent: database=burgers name=ben password=12345 roles='read' state=present
- mongodb_authent: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present
- mongodb_authent: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present
# add a user to database in a replica set, the primary server is automatically discovered and written to
- mongodb_authent: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present
'''
import ConfigParser
from distutils.version import LooseVersion
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo.errors import NotMasterError
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
# =========================================
# MongoDB module specific support methods.
#
def check_compatibility(module, client):
srv_info = client.server_info()
if LooseVersion(srv_info['version']) >= LooseVersion('3.0') and LooseVersion(PyMongoVersion) <= LooseVersion('3.0'):
module.fail_json(msg=' (Note: you must use pymongo 3.0+ with MongoDB >= 3.0)')
elif LooseVersion(srv_info['version']) >= LooseVersion('2.6') and LooseVersion(PyMongoVersion) <= LooseVersion('2.7'):
module.fail_json(msg=' (Note: you must use pymongo 2.7.x-2.9.x with MongoDB 2.6)')
elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
def user_find(client, user):
for mongo_user in client["admin"].system.users.find():
if mongo_user['user'] == user:
return mongo_user
return False
def user_add(module, client, db_name, user, password, roles):
#pymono's user_add is a _create_or_update_user so we won't know if it was changed or updated
#without reproducing a lot of the logic in database.py of pymongo
db = client[db_name]
if roles is None:
db.add_user(user, password, False)
else:
try:
db.add_user(user, password, None, roles=roles)
return True
except OperationFailure, e:
err_msg = "[function user_add] " + str(e)
module.fail_json(msg=err_msg)
except NotMasterError, e:
pass
return False
def user_remove(module, client, db_name, user):
exists = user_find(client, user)
if exists:
db = client[db_name]
db.remove_user(user)
else:
module.exit_json(changed=False, user=user)
def load_mongocnf():
config = ConfigParser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (ConfigParser.NoOptionError, IOError):
return False
return creds
def authenticate(client, login_user, login_password, login_database):
try:
client.admin.authenticate(login_user, login_password)
except OperationFailure:
pass
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
login_database=dict(default=None),
replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
name=dict(required=True, aliases=['user']),
password=dict(aliases=['pass']),
ssl=dict(default=False),
roles=dict(default=None, type='list'),
state=dict(default='present', choices=['absent', 'present', 'admin']),
update_password=dict(default='always', choices=['always', 'on_create']),
)
)
if not pymongo_found:
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['name']
password = module.params['password']
ssl = module.params['ssl']
roles = module.params['roles']
state = module.params['state']
update_password = module.params['update_password']
try:
if replica_set:
client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
else:
client = MongoClient(login_host, int(login_port), ssl=ssl)
if login_user is None or login_password is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
authenticate(client, login_user, login_password, login_database)
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
if db_name != "admin":
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
#else: this has to be the first admin user added
except ConnectionFailure, e:
module.fail_json(msg='unable to connect to database: %s' % str(e))
check_compatibility(module, client)
change_state=False
if state == 'admin':
if password is None and update_password == 'always':
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
try:
if update_password == 'on_create' and user_find(client, user):
addU=False
else:
addU=True
except OperationFailure:
addU=True
if addU:
try:
if user_add(module, client, db_name, user, password, roles):
change_state=True
except OperationFailure, e:
module.fail_json(msg='Unable to add or update user: %s' % str(e))
if state == 'present':
if password is None and update_password == 'always':
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
if update_password != 'always' and user_find(client, user):
password = None
try:
if user_add(module, client, db_name, user, password, roles):
change_state=True
except OperationFailure, e:
module.fail_json(msg='Unable to add or update user: %s' % str(e))
elif state == 'absent':
try:
user_remove(module, client, db_name, user)
change_state=True
except OperationFailure, e:
module.fail_json(msg='Unable to remove user: %s' % str(e))
module.exit_json(changed=change_state, user=user)
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
vijayendrabvs/ssl-python-neutronclient | neutronclient/neutron/v2_0/securitygroup.py | 3 | 9826 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
import logging
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.openstack.common.gettextutils import _
class ListSecurityGroup(neutronV20.ListCommand):
"""List security groups that belong to a given tenant."""
resource = 'security_group'
log = logging.getLogger(__name__ + '.ListSecurityGroup')
list_columns = ['id', 'name', 'description']
pagination_support = True
sorting_support = True
class ShowSecurityGroup(neutronV20.ShowCommand):
"""Show information of a given security group."""
resource = 'security_group'
log = logging.getLogger(__name__ + '.ShowSecurityGroup')
allow_names = True
class CreateSecurityGroup(neutronV20.CreateCommand):
"""Create a security group."""
resource = 'security_group'
log = logging.getLogger(__name__ + '.CreateSecurityGroup')
def add_known_arguments(self, parser):
parser.add_argument(
'name', metavar='NAME',
help=_('Name of security group'))
parser.add_argument(
'--description',
help=_('Description of security group'))
def args2body(self, parsed_args):
body = {'security_group': {
'name': parsed_args.name}}
if parsed_args.description:
body['security_group'].update(
{'description': parsed_args.description})
if parsed_args.tenant_id:
body['security_group'].update({'tenant_id': parsed_args.tenant_id})
return body
class DeleteSecurityGroup(neutronV20.DeleteCommand):
"""Delete a given security group."""
log = logging.getLogger(__name__ + '.DeleteSecurityGroup')
resource = 'security_group'
allow_names = True
class UpdateSecurityGroup(neutronV20.UpdateCommand):
"""Update a given security group."""
log = logging.getLogger(__name__ + '.UpdateSecurityGroup')
resource = 'security_group'
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
help=_('Name of security group'))
parser.add_argument(
'--description',
help=_('Description of security group'))
def args2body(self, parsed_args):
body = {'security_group': {}}
if parsed_args.name:
body['security_group'].update(
{'name': parsed_args.name})
if parsed_args.description:
body['security_group'].update(
{'description': parsed_args.description})
return body
class ListSecurityGroupRule(neutronV20.ListCommand):
"""List security group rules that belong to a given tenant."""
resource = 'security_group_rule'
log = logging.getLogger(__name__ + '.ListSecurityGroupRule')
list_columns = ['id', 'security_group_id', 'direction', 'protocol',
'remote_ip_prefix', 'remote_group_id']
replace_rules = {'security_group_id': 'security_group',
'remote_group_id': 'remote_group'}
pagination_support = True
sorting_support = True
def get_parser(self, prog_name):
parser = super(ListSecurityGroupRule, self).get_parser(prog_name)
parser.add_argument(
'--no-nameconv', action='store_true',
help=_('Do not convert security group ID to its name'))
return parser
@staticmethod
def replace_columns(cols, rules, reverse=False):
if reverse:
rules = dict((rules[k], k) for k in rules.keys())
return [rules.get(col, col) for col in cols]
def retrieve_list(self, parsed_args):
parsed_args.fields = self.replace_columns(parsed_args.fields,
self.replace_rules,
reverse=True)
return super(ListSecurityGroupRule, self).retrieve_list(parsed_args)
def extend_list(self, data, parsed_args):
if parsed_args.no_nameconv:
return
neutron_client = self.get_client()
search_opts = {'fields': ['id', 'name']}
if self.pagination_support:
page_size = parsed_args.page_size
if page_size:
search_opts.update({'limit': page_size})
sec_group_ids = set()
for rule in data:
for key in self.replace_rules:
sec_group_ids.add(rule[key])
search_opts.update({"id": sec_group_ids})
secgroups = neutron_client.list_security_groups(**search_opts)
secgroups = secgroups.get('security_groups', [])
sg_dict = dict([(sg['id'], sg['name'])
for sg in secgroups if sg['name']])
for rule in data:
for key in self.replace_rules:
rule[key] = sg_dict.get(rule[key], rule[key])
def setup_columns(self, info, parsed_args):
parsed_args.columns = self.replace_columns(parsed_args.columns,
self.replace_rules,
reverse=True)
# NOTE(amotoki): 2nd element of the tuple returned by setup_columns()
# is a generator, so if you need to create a look using the generator
# object, you need to recreate a generator to show a list expectedly.
info = super(ListSecurityGroupRule, self).setup_columns(info,
parsed_args)
cols = info[0]
if not parsed_args.no_nameconv:
cols = self.replace_columns(info[0], self.replace_rules)
parsed_args.columns = cols
return (cols, info[1])
class ShowSecurityGroupRule(neutronV20.ShowCommand):
"""Show information of a given security group rule."""
resource = 'security_group_rule'
log = logging.getLogger(__name__ + '.ShowSecurityGroupRule')
allow_names = False
class CreateSecurityGroupRule(neutronV20.CreateCommand):
"""Create a security group rule."""
resource = 'security_group_rule'
log = logging.getLogger(__name__ + '.CreateSecurityGroupRule')
def add_known_arguments(self, parser):
parser.add_argument(
'security_group_id', metavar='SECURITY_GROUP',
help=_('Security group name or id to add rule.'))
parser.add_argument(
'--direction',
default='ingress', choices=['ingress', 'egress'],
help=_('Direction of traffic: ingress/egress'))
parser.add_argument(
'--ethertype',
default='IPv4',
help=_('IPv4/IPv6'))
parser.add_argument(
'--protocol',
help=_('Protocol of packet'))
parser.add_argument(
'--port-range-min',
help=_('Starting port range'))
parser.add_argument(
'--port_range_min',
help=argparse.SUPPRESS)
parser.add_argument(
'--port-range-max',
help=_('Ending port range'))
parser.add_argument(
'--port_range_max',
help=argparse.SUPPRESS)
parser.add_argument(
'--remote-ip-prefix',
help=_('CIDR to match on'))
parser.add_argument(
'--remote_ip_prefix',
help=argparse.SUPPRESS)
parser.add_argument(
'--remote-group-id', metavar='REMOTE_GROUP',
help=_('Remote security group name or id to apply rule'))
parser.add_argument(
'--remote_group_id',
help=argparse.SUPPRESS)
def args2body(self, parsed_args):
_security_group_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'security_group', parsed_args.security_group_id)
body = {'security_group_rule': {
'security_group_id': _security_group_id,
'direction': parsed_args.direction,
'ethertype': parsed_args.ethertype}}
if parsed_args.protocol:
body['security_group_rule'].update(
{'protocol': parsed_args.protocol})
if parsed_args.port_range_min:
body['security_group_rule'].update(
{'port_range_min': parsed_args.port_range_min})
if parsed_args.port_range_max:
body['security_group_rule'].update(
{'port_range_max': parsed_args.port_range_max})
if parsed_args.remote_ip_prefix:
body['security_group_rule'].update(
{'remote_ip_prefix': parsed_args.remote_ip_prefix})
if parsed_args.remote_group_id:
_remote_group_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'security_group',
parsed_args.remote_group_id)
body['security_group_rule'].update(
{'remote_group_id': _remote_group_id})
if parsed_args.tenant_id:
body['security_group_rule'].update(
{'tenant_id': parsed_args.tenant_id})
return body
class DeleteSecurityGroupRule(neutronV20.DeleteCommand):
"""Delete a given security group rule."""
log = logging.getLogger(__name__ + '.DeleteSecurityGroupRule')
resource = 'security_group_rule'
allow_names = False
| apache-2.0 |
gtaylor/ansible | v2/ansible/playbook/base.py | 2 | 12371 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import uuid
from functools import partial
from inspect import getmembers
from io import FileIO
from six import iteritems, string_types
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.template import Templar
from ansible.utils.boolean import boolean
from ansible.utils.debug import debug
from ansible.template import template
class Base:
# connection/transport
_connection = FieldAttribute(isa='string')
_port = FieldAttribute(isa='int')
_remote_user = FieldAttribute(isa='string')
# vars and flags
_vars = FieldAttribute(isa='dict', default=dict())
_environment = FieldAttribute(isa='dict', default=dict())
_no_log = FieldAttribute(isa='bool', default=False)
def __init__(self):
# initialize the data loader and variable manager, which will be provided
# later when the object is actually loaded
self._loader = None
self._variable_manager = None
# every object gets a random uuid:
self._uuid = uuid.uuid4()
# and initialize the base attributes
self._initialize_base_attributes()
# The following three functions are used to programatically define data
# descriptors (aka properties) for the Attributes of all of the playbook
# objects (tasks, blocks, plays, etc).
#
# The function signature is a little strange because of how we define
# them. We use partial to give each method the name of the Attribute that
# it is for. Since partial prefills the positional arguments at the
# beginning of the function we end up with the first positional argument
# being allocated to the name instead of to the class instance (self) as
# normal. To deal with that we make the property name field the first
# positional argument and self the second arg.
#
# Because these methods are defined inside of the class, they get bound to
# the instance when the object is created. After we run partial on them
# and put the result back into the class as a property, they get bound
# a second time. This leads to self being placed in the arguments twice.
# To work around that, we mark the functions as @staticmethod so that the
# first binding to the instance doesn't happen.
@staticmethod
def _generic_g(prop_name, self):
method = "_get_attr_%s" % prop_name
if method in dir(self):
return getattr(self, method)()
return self._attributes[prop_name]
@staticmethod
def _generic_s(prop_name, self, value):
self._attributes[prop_name] = value
@staticmethod
def _generic_d(prop_name, self):
del self._attributes[prop_name]
def _get_base_attributes(self):
'''
Returns the list of attributes for this class (or any subclass thereof).
If the attribute name starts with an underscore, it is removed
'''
base_attributes = dict()
for (name, value) in getmembers(self.__class__):
if isinstance(value, Attribute):
if name.startswith('_'):
name = name[1:]
base_attributes[name] = value
return base_attributes
def _initialize_base_attributes(self):
# each class knows attributes set upon it, see Task.py for example
self._attributes = dict()
for (name, value) in self._get_base_attributes().items():
getter = partial(self._generic_g, name)
setter = partial(self._generic_s, name)
deleter = partial(self._generic_d, name)
# Place the property into the class so that cls.name is the
# property functions.
setattr(Base, name, property(getter, setter, deleter))
# Place the value into the instance so that the property can
# process and hold that value/
setattr(self, name, value.default)
def preprocess_data(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
for base_class in self.__class__.mro():
method = getattr(self, "_preprocess_data_%s" % base_class.__name__.lower(), None)
if method:
return method(ds)
return ds
def load_data(self, ds, variable_manager=None, loader=None):
''' walk the input datastructure and assign any values '''
assert ds is not None
# the variable manager class is used to manage and merge variables
# down to a single dictionary for reference in templating, etc.
self._variable_manager = variable_manager
# the data loader class is used to parse data from strings and files
if loader is not None:
self._loader = loader
else:
self._loader = DataLoader()
if isinstance(ds, string_types) or isinstance(ds, FileIO):
ds = self._loader.load(ds)
# call the preprocess_data() function to massage the data into
# something we can more easily parse, and then call the validation
# function on it to ensure there are no incorrect key values
ds = self.preprocess_data(ds)
self._validate_attributes(ds)
# Walk all attributes in the class.
#
# FIXME: we currently don't do anything with private attributes but
# may later decide to filter them out of 'ds' here.
for name in self._get_base_attributes():
# copy the value over unless a _load_field method is defined
if name in ds:
method = getattr(self, '_load_%s' % name, None)
if method:
self._attributes[name] = method(name, ds[name])
else:
self._attributes[name] = ds[name]
# run early, non-critical validation
self.validate()
# cache the datastructure internally
setattr(self, '_ds', ds)
# return the constructed object
return self
def get_ds(self):
try:
return getattr(self, '_ds')
except AttributeError:
return None
def get_loader(self):
return self._loader
def get_variable_manager(self):
return self._variable_manager
def _validate_attributes(self, ds):
'''
Ensures that there are no keys in the datastructure which do
not map to attributes for this object.
'''
valid_attrs = frozenset(name for name in self._get_base_attributes())
for key in ds:
if key not in valid_attrs:
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
def validate(self, all_vars=dict()):
''' validation that is done at parse time, not load time '''
# walk all fields in the object
for (name, attribute) in iteritems(self._get_base_attributes()):
# run validator only if present
method = getattr(self, '_validate_%s' % name, None)
if method:
method(attribute, name, getattr(self, name))
def copy(self):
'''
Create a copy of this object and return it.
'''
new_me = self.__class__()
for name in self._get_base_attributes():
setattr(new_me, name, getattr(self, name))
new_me._loader = self._loader
new_me._variable_manager = self._variable_manager
return new_me
def post_validate(self, all_vars=dict(), fail_on_undefined=True):
'''
we can't tell that everything is of the right type until we have
all the variables. Run basic types (from isa) as well as
any _post_validate_<foo> functions.
'''
basedir = None
if self._loader is not None:
basedir = self._loader.get_basedir()
templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=fail_on_undefined)
for (name, attribute) in iteritems(self._get_base_attributes()):
if getattr(self, name) is None:
if not attribute.required:
continue
else:
raise AnsibleParserError("the field '%s' is required but was not set" % name)
try:
# if the attribute contains a variable, template it now
value = templar.template(getattr(self, name))
# run the post-validator if present
method = getattr(self, '_post_validate_%s' % name, None)
if method:
value = method(attribute, value, all_vars, fail_on_undefined)
else:
# otherwise, just make sure the attribute is of the type it should be
if attribute.isa == 'string':
value = unicode(value)
elif attribute.isa == 'int':
value = int(value)
elif attribute.isa == 'bool':
value = boolean(value)
elif attribute.isa == 'list':
if not isinstance(value, list):
value = [ value ]
elif attribute.isa == 'dict' and not isinstance(value, dict):
raise TypeError()
# and assign the massaged value back to the attribute field
setattr(self, name, value)
except (TypeError, ValueError) as e:
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
except UndefinedError as e:
if fail_on_undefined:
raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds())
def serialize(self):
'''
Serializes the object derived from the base object into
a dictionary of values. This only serializes the field
attributes for the object, so this may need to be overridden
for any classes which wish to add additional items not stored
as field attributes.
'''
repr = dict()
for name in self._get_base_attributes():
repr[name] = getattr(self, name)
# serialize the uuid field
repr['uuid'] = getattr(self, '_uuid')
return repr
def deserialize(self, data):
'''
Given a dictionary of values, load up the field attributes for
this object. As with serialize(), if there are any non-field
attribute data members, this method will need to be overridden
and extended.
'''
assert isinstance(data, dict)
for (name, attribute) in iteritems(self._get_base_attributes()):
if name in data:
setattr(self, name, data[name])
else:
setattr(self, name, attribute.default)
# restore the UUID field
setattr(self, '_uuid', data.get('uuid'))
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
self.__init__()
self.deserialize(data)
| gpl-3.0 |
rtucker-mozilla/mozpackager | vendor-local/lib/python/kombu/utils/__init__.py | 2 | 11214 | """
kombu.utils
===========
Internal utilities.
"""
from __future__ import absolute_import
import importlib
import random
import sys
from contextlib import contextmanager
from itertools import count, repeat
from time import sleep
from uuid import UUID, uuid4 as _uuid4, _uuid_generate_random
from .encoding import safe_repr as _safe_repr
try:
import ctypes
except:
ctypes = None # noqa
__all__ = ['EqualityDict', 'say', 'uuid', 'kwdict', 'maybe_list',
'fxrange', 'fxrangemax', 'retry_over_time',
'emergency_dump_state', 'cached_property',
'reprkwargs', 'reprcall', 'nested']
def symbol_by_name(name, aliases={}, imp=None, package=None,
sep='.', default=None, **kwargs):
"""Get symbol by qualified name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
or using ':' to separate module and symbol::
celery.concurrency.processes:TaskPool
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> symbol_by_name('celery.concurrency.processes.TaskPool')
<class 'celery.concurrency.processes.TaskPool'>
>>> symbol_by_name('default', {
... 'default': 'celery.concurrency.processes.TaskPool'})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> symbol_by_name(TaskPool) is TaskPool
True
"""
if imp is None:
imp = importlib.import_module
if not isinstance(name, basestring):
return name # already a class
name = aliases.get(name) or name
sep = ':' if ':' in name else sep
module_name, _, cls_name = name.rpartition(sep)
if not module_name:
cls_name, module_name = None, package if package else cls_name
try:
try:
module = imp(module_name, package=package, **kwargs)
except ValueError, exc:
raise ValueError, ValueError(
"Couldn't import %r: %s" % (name, exc)), sys.exc_info()[2]
return getattr(module, cls_name) if cls_name else module
except (ImportError, AttributeError):
if default is None:
raise
return default
def eqhash(o):
try:
return o.__eqhash__()
except AttributeError:
return hash(o)
class EqualityDict(dict):
def __getitem__(self, key):
h = eqhash(key)
if h not in self:
return self.__missing__(key)
return dict.__getitem__(self, h)
def __setitem__(self, key, value):
return dict.__setitem__(self, eqhash(key), value)
def __delitem__(self, key):
return dict.__delitem__(self, eqhash(key))
def say(m, *s):
sys.stderr.write(str(m) % s + '\n')
def uuid4():
# Workaround for http://bugs.python.org/issue4607
if ctypes and _uuid_generate_random: # pragma: no cover
buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(buffer)
return UUID(bytes=buffer.raw)
return _uuid4()
def uuid():
"""Generate a unique id, having - hopefully - a very small chance of
collision.
For now this is provided by :func:`uuid.uuid4`.
"""
return str(uuid4())
gen_unique_id = uuid
if sys.version_info >= (2, 6, 5):
def kwdict(kwargs):
return kwargs
else:
def kwdict(kwargs): # pragma: no cover # noqa
"""Make sure keyword arguments are not in Unicode.
This should be fixed in newer Python versions,
see: http://bugs.python.org/issue4978.
"""
return dict((key.encode('utf-8'), value)
for key, value in kwargs.items())
def maybe_list(v):
if v is None:
return []
if hasattr(v, '__iter__'):
return v
return [v]
def fxrange(start=1.0, stop=None, step=1.0, repeatlast=False):
cur = start * 1.0
while 1:
if not stop or cur <= stop:
yield cur
cur += step
else:
if not repeatlast:
break
yield cur - step
def fxrangemax(start=1.0, stop=None, step=1.0, max=100.0):
sum_, cur = 0, start * 1.0
while 1:
if sum_ >= max:
break
yield cur
if stop:
cur = min(cur + step, stop)
else:
cur += step
sum_ += cur
def retry_over_time(fun, catch, args=[], kwargs={}, errback=None,
max_retries=None, interval_start=2, interval_step=2, interval_max=30,
callback=None):
"""Retry the function over and over until max retries is exceeded.
For each retry we sleep a for a while before we try again, this interval
is increased for every retry until the max seconds is reached.
:param fun: The function to try
:param catch: Exceptions to catch, can be either tuple or a single
exception class.
:keyword args: Positional arguments passed on to the function.
:keyword kwargs: Keyword arguments passed on to the function.
:keyword errback: Callback for when an exception in ``catch`` is raised.
The callback must take two arguments: ``exc`` and ``interval``, where
``exc`` is the exception instance, and ``interval`` is the time in
seconds to sleep next..
:keyword max_retries: Maximum number of retries before we give up.
If this is not set, we will retry forever.
:keyword interval_start: How long (in seconds) we start sleeping between
retries.
:keyword interval_step: By how much the interval is increased for each
retry.
:keyword interval_max: Maximum number of seconds to sleep between retries.
"""
retries = 0
interval_range = fxrange(interval_start,
interval_max + interval_start,
interval_step, repeatlast=True)
for retries in count():
try:
return fun(*args, **kwargs)
except catch, exc:
if max_retries is not None and retries > max_retries:
raise
if callback:
callback()
tts = errback(exc, interval_range, retries) if errback else None
if tts:
for i in range(int(tts / interval_step)):
if callback:
callback()
sleep(interval_step)
def emergency_dump_state(state, open_file=open, dump=None):
from pprint import pformat
from tempfile import mktemp
if dump is None:
import pickle
dump = pickle.dump
persist = mktemp()
say('EMERGENCY DUMP STATE TO FILE -> %s <-' % persist)
fh = open_file(persist, 'w')
try:
try:
dump(state, fh, protocol=0)
except Exception, exc:
say('Cannot pickle state: %r. Fallback to pformat.' % (exc, ))
fh.write(pformat(state))
finally:
fh.flush()
fh.close()
return persist
class cached_property(object):
"""Property descriptor that caches the return value
of the get function.
*Examples*
.. code-block:: python
@cached_property
def connection(self):
return Connection()
@connection.setter # Prepares stored value
def connection(self, value):
if value is None:
raise TypeError('Connection must be a connection')
return value
@connection.deleter
def connection(self, value):
# Additional action to do at del(self.attr)
if value is not None:
print('Connection %r deleted' % (value, ))
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.__get = fget
self.__set = fset
self.__del = fdel
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, obj, type=None):
if obj is None:
return self
try:
return obj.__dict__[self.__name__]
except KeyError:
value = obj.__dict__[self.__name__] = self.__get(obj)
return value
def __set__(self, obj, value):
if obj is None:
return self
if self.__set is not None:
value = self.__set(obj, value)
obj.__dict__[self.__name__] = value
def __delete__(self, obj):
if obj is None:
return self
try:
value = obj.__dict__.pop(self.__name__)
except KeyError:
pass
else:
if self.__del is not None:
self.__del(obj, value)
def setter(self, fset):
return self.__class__(self.__get, fset, self.__del)
def deleter(self, fdel):
return self.__class__(self.__get, self.__set, fdel)
def reprkwargs(kwargs, sep=', ', fmt='%s=%s'):
return sep.join(fmt % (k, _safe_repr(v)) for k, v in kwargs.iteritems())
def reprcall(name, args=(), kwargs={}, sep=', '):
return '%s(%s%s%s)' % (name, sep.join(map(_safe_repr, args or ())),
(args and kwargs) and sep or '',
reprkwargs(kwargs, sep))
@contextmanager
def nested(*managers): # pragma: no cover
"""Combine multiple context managers into a single nested
context manager."""
exits = []
vars = []
exc = (None, None, None)
try:
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
raise exc[0], exc[1], exc[2]
finally:
del(exc)
def shufflecycle(it):
it = list(it) # don't modify callers list
shuffle = random.shuffle
for _ in repeat(None):
shuffle(it)
yield it[0]
def entrypoints(namespace):
try:
from pkg_resources import iter_entry_points
except ImportError:
return iter([])
return ((ep, ep.load()) for ep in iter_entry_points(namespace))
class ChannelPromise(object):
def __init__(self, contract):
self.__contract__ = contract
def __call__(self):
try:
return self.__value__
except AttributeError:
value = self.__value__ = self.__contract__()
return value
def __repr__(self):
return '<promise: %r>' % (self(), )
| bsd-3-clause |
usersource/tasks | tasks_phonegap/Tasks/cordova/plugins/io.usersource.anno/tools/copytool3/apiclient/http.py | 102 | 52847 | # Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import StringIO
import base64
import copy
import gzip
import httplib2
import logging
import mimeparse
import mimetypes
import os
import random
import sys
import time
import urllib
import urlparse
import uuid
from email.generator import Generator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.parser import FeedParser
from errors import BatchError
from errors import HttpError
from errors import InvalidChunkSizeError
from errors import ResumableUploadError
from errors import UnexpectedBodyError
from errors import UnexpectedMethodError
from model import JsonModel
from oauth2client import util
from oauth2client.anyjson import simplejson
DEFAULT_CHUNK_SIZE = 512*1024
MAX_URI_LENGTH = 2048
class MediaUploadProgress(object):
"""Status of a resumable upload."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes sent so far.
total_size: int, total bytes in complete upload, or None if the total
upload size isn't known ahead of time.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of upload completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the upload is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaDownloadProgress(object):
"""Status of a resumable download."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes received so far.
total_size: int, total bytes in complete download.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of download completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the download is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaUpload(object):
"""Describes a media object to upload.
Base class that defines the interface of MediaUpload subclasses.
Note that subclasses of MediaUpload may allow you to control the chunksize
when uploading a media object. It is important to keep the size of the chunk
as large as possible to keep the upload efficient. Other factors may influence
the size of the chunk you use, particularly if you are working in an
environment where individual HTTP requests may have a hardcoded time limit,
such as under certain classes of requests under Google App Engine.
Streams are io.Base compatible objects that support seek(). Some MediaUpload
subclasses support using streams directly to upload data. Support for
streaming may be indicated by a MediaUpload sub-class and if appropriate for a
platform that stream will be used for uploading the media object. The support
for streaming is indicated by has_stream() returning True. The stream() method
should return an io.Base object that supports seek(). On platforms where the
underlying httplib module supports streaming, for example Python 2.6 and
later, the stream will be passed into the http library which will result in
less memory being used and possibly faster uploads.
If you need to upload media that can't be uploaded using any of the existing
MediaUpload sub-class then you can sub-class MediaUpload for your particular
needs.
"""
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
raise NotImplementedError()
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return 'application/octet-stream'
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return None
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return False
def getbytes(self, begin, end):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
raise NotImplementedError()
def has_stream(self):
"""Does the underlying upload support a streaming interface.
Streaming means it is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
Returns:
True if the call to stream() will return an instance of a seekable io.Base
subclass.
"""
return False
def stream(self):
"""A stream interface to the data being uploaded.
Returns:
The returned value is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
"""
raise NotImplementedError()
@util.positional(1)
def _to_json(self, strip=None):
"""Utility function for creating a JSON representation of a MediaUpload.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
if strip is not None:
for member in strip:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Create a JSON representation of an instance of MediaUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json()
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a MediaUpload subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of MediaUpload that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
class MediaIoBaseUpload(MediaUpload):
"""A MediaUpload for a io.Base objects.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
fh = io.BytesIO('...Some data to upload...')
media = MediaIoBaseUpload(fh, mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
Depending on the platform you are working on, you may pass -1 as the
chunksize, which indicates that the entire file should be uploaded in a single
request. If the underlying platform supports streams, such as Python 2.6 or
later, then this can be very efficient as it avoids multiple connections, and
also avoids loading the entire file into memory before sending it. Note that
Google App Engine has a 5MB limit on request size, so you should never set
your chunksize larger than 5MB, or to -1.
"""
@util.positional(3)
def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
fd: io.Base or file object, The source of the bytes to upload. MUST be
opened in blocking mode, do not use streams opened in non-blocking mode.
The given stream must be seekable, that is, it must be able to call
seek() on fd.
mimetype: string, Mime-type of the file.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True. Pass in a value of -1 if the file is to be
uploaded as a single chunk. Note that Google App Engine has a 5MB limit
on request size, so you should never set your chunksize larger than 5MB,
or to -1.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
super(MediaIoBaseUpload, self).__init__()
self._fd = fd
self._mimetype = mimetype
if not (chunksize == -1 or chunksize > 0):
raise InvalidChunkSizeError()
self._chunksize = chunksize
self._resumable = resumable
self._fd.seek(0, os.SEEK_END)
self._size = self._fd.tell()
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
self._fd.seek(begin)
return self._fd.read(length)
def has_stream(self):
"""Does the underlying upload support a streaming interface.
Streaming means it is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
Returns:
True if the call to stream() will return an instance of a seekable io.Base
subclass.
"""
return True
def stream(self):
"""A stream interface to the data being uploaded.
Returns:
The returned value is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
"""
return self._fd
def to_json(self):
"""This upload type is not serializable."""
raise NotImplementedError('MediaIoBaseUpload is not serializable.')
class MediaFileUpload(MediaIoBaseUpload):
"""A MediaUpload for a file.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed uploading images:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
Depending on the platform you are working on, you may pass -1 as the
chunksize, which indicates that the entire file should be uploaded in a single
request. If the underlying platform supports streams, such as Python 2.6 or
later, then this can be very efficient as it avoids multiple connections, and
also avoids loading the entire file into memory before sending it. Note that
Google App Engine has a 5MB limit on request size, so you should never set
your chunksize larger than 5MB, or to -1.
"""
@util.positional(2)
def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True. Pass in a value of -1 if the file is to be
uploaded in a single chunk. Note that Google App Engine has a 5MB limit
on request size, so you should never set your chunksize larger than 5MB,
or to -1.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._filename = filename
fd = open(self._filename, 'rb')
if mimetype is None:
(mimetype, encoding) = mimetypes.guess_type(filename)
super(MediaFileUpload, self).__init__(fd, mimetype, chunksize=chunksize,
resumable=resumable)
def to_json(self):
"""Creating a JSON representation of an instance of MediaFileUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(strip=['_fd'])
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaFileUpload(d['_filename'], mimetype=d['_mimetype'],
chunksize=d['_chunksize'], resumable=d['_resumable'])
class MediaInMemoryUpload(MediaIoBaseUpload):
"""MediaUpload for a chunk of bytes.
DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
the stream.
"""
@util.positional(2)
def __init__(self, body, mimetype='application/octet-stream',
chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Create a new MediaInMemoryUpload.
DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
the stream.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
fd = StringIO.StringIO(body)
super(MediaInMemoryUpload, self).__init__(fd, mimetype, chunksize=chunksize,
resumable=resumable)
class MediaIoBaseDownload(object):
""""Download media resources.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
Example:
request = farms.animals().get_media(id='cow')
fh = io.FileIO('cow.png', mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print "Download %d%%." % int(status.progress() * 100)
print "Download Complete!"
"""
@util.positional(3)
def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
"""Constructor.
Args:
fd: io.Base or file object, The stream in which to write the downloaded
bytes.
request: apiclient.http.HttpRequest, the media request to perform in
chunks.
chunksize: int, File will be downloaded in chunks of this many bytes.
"""
self._fd = fd
self._request = request
self._uri = request.uri
self._chunksize = chunksize
self._progress = 0
self._total_size = None
self._done = False
# Stubs for testing.
self._sleep = time.sleep
self._rand = random.random
@util.positional(1)
def next_chunk(self, num_retries=0):
"""Get the next chunk of the download.
Args:
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self._progress, self._progress + self._chunksize)
}
http = self._request.http
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning(
'Retry #%d for media download: GET %s, following status: %d'
% (retry_num, self._uri, resp.status))
resp, content = http.request(self._uri, headers=headers)
if resp.status < 500:
break
if resp.status in [200, 206]:
if 'content-location' in resp and resp['content-location'] != self._uri:
self._uri = resp['content-location']
self._progress += len(content)
self._fd.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self._total_size = int(length)
if self._progress == self._total_size:
self._done = True
return MediaDownloadProgress(self._progress, self._total_size), self._done
else:
raise HttpError(resp, content, uri=self._uri)
class _StreamSlice(object):
"""Truncated stream.
Takes a stream and presents a stream that is a slice of the original stream.
This is used when uploading media in chunks. In later versions of Python a
stream can be passed to httplib in place of the string of data to send. The
problem is that httplib just blindly reads to the end of the stream. This
wrapper presents a virtual stream that only reads to the end of the chunk.
"""
def __init__(self, stream, begin, chunksize):
"""Constructor.
Args:
stream: (io.Base, file object), the stream to wrap.
begin: int, the seek position the chunk begins at.
chunksize: int, the size of the chunk.
"""
self._stream = stream
self._begin = begin
self._chunksize = chunksize
self._stream.seek(begin)
def read(self, n=-1):
"""Read n bytes.
Args:
n, int, the number of bytes to read.
Returns:
A string of length 'n', or less if EOF is reached.
"""
# The data left available to read sits in [cur, end)
cur = self._stream.tell()
end = self._begin + self._chunksize
if n == -1 or cur + n > end:
n = end - cur
return self._stream.read(n)
class HttpRequest(object):
"""Encapsulates a single HTTP request."""
@util.positional(4)
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request,
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
resumable: MediaUpload, None if this is not a resumbale request.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.methodId = methodId
self.http = http
self.postproc = postproc
self.resumable = resumable
self.response_callbacks = []
self._in_error_state = False
# Pull the multipart boundary out of the content-type header.
major, minor, params = mimeparse.parse_mime_type(
headers.get('content-type', 'application/json'))
# The size of the non-media part of the request.
self.body_size = len(self.body or '')
# The resumable URI to send chunks to.
self.resumable_uri = None
# The bytes that have been uploaded.
self.resumable_progress = 0
# Stubs for testing.
self._rand = random.random
self._sleep = time.sleep
@util.positional(1)
def execute(self, http=None, num_retries=0):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http=http, num_retries=num_retries)
return body
# Non-resumable case.
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
# If the request URI is too long then turn it into a POST request.
if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET':
self.method = 'POST'
self.headers['x-http-method-override'] = 'GET'
self.headers['content-type'] = 'application/x-www-form-urlencoded'
parsed = urlparse.urlparse(self.uri)
self.uri = urlparse.urlunparse(
(parsed.scheme, parsed.netloc, parsed.path, parsed.params, None,
None)
)
self.body = parsed.query
self.headers['content-length'] = str(len(self.body))
# Handle retries for server-side errors.
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning('Retry #%d for request: %s %s, following status: %d'
% (retry_num, self.method, self.uri, resp.status))
resp, content = http.request(str(self.uri), method=str(self.method),
body=self.body, headers=self.headers)
if resp.status < 500:
break
for callback in self.response_callbacks:
callback(resp)
if resp.status >= 300:
raise HttpError(resp, content, uri=self.uri)
return self.postproc(resp, content)
@util.positional(2)
def add_response_callback(self, cb):
"""add_response_headers_callback
Args:
cb: Callback to be called on receiving the response headers, of signature:
def cb(resp):
# Where resp is an instance of httplib2.Response
"""
self.response_callbacks.append(cb)
@util.positional(1)
def next_chunk(self, http=None, num_retries=0):
"""Execute the next step of a resumable upload.
Can only be used if the method being executed supports media uploads and
the MediaUpload object passed in was flagged as using resumable upload.
Example:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1000, resumable=True)
request = farm.animals().insert(
id='cow',
name='cow.png',
media_body=media)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Upload %d%% complete." % int(status.progress() * 100)
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable.size() is None:
size = '*'
else:
size = str(self.resumable.size())
if self.resumable_uri is None:
start_headers = copy.copy(self.headers)
start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
if size != '*':
start_headers['X-Upload-Content-Length'] = size
start_headers['content-length'] = str(self.body_size)
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning(
'Retry #%d for resumable URI request: %s %s, following status: %d'
% (retry_num, self.method, self.uri, resp.status))
resp, content = http.request(self.uri, method=self.method,
body=self.body,
headers=start_headers)
if resp.status < 500:
break
if resp.status == 200 and 'location' in resp:
self.resumable_uri = resp['location']
else:
raise ResumableUploadError(resp, content)
elif self._in_error_state:
# If we are in an error state then query the server for current state of
# the upload by sending an empty PUT and reading the 'range' header in
# the response.
headers = {
'Content-Range': 'bytes */%s' % size,
'content-length': '0'
}
resp, content = http.request(self.resumable_uri, 'PUT',
headers=headers)
status, body = self._process_response(resp, content)
if body:
# The upload was complete.
return (status, body)
# The httplib.request method can take streams for the body parameter, but
# only in Python 2.6 or later. If a stream is available under those
# conditions then use it as the body argument.
if self.resumable.has_stream() and sys.version_info[1] >= 6:
data = self.resumable.stream()
if self.resumable.chunksize() == -1:
data.seek(self.resumable_progress)
chunk_end = self.resumable.size() - self.resumable_progress - 1
else:
# Doing chunking with a stream, so wrap a slice of the stream.
data = _StreamSlice(data, self.resumable_progress,
self.resumable.chunksize())
chunk_end = min(
self.resumable_progress + self.resumable.chunksize() - 1,
self.resumable.size() - 1)
else:
data = self.resumable.getbytes(
self.resumable_progress, self.resumable.chunksize())
# A short read implies that we are at EOF, so finish the upload.
if len(data) < self.resumable.chunksize():
size = str(self.resumable_progress + len(data))
chunk_end = self.resumable_progress + len(data) - 1
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
self.resumable_progress, chunk_end, size),
# Must set the content-length header here because httplib can't
# calculate the size when working with _StreamSlice.
'Content-Length': str(chunk_end - self.resumable_progress + 1)
}
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning(
'Retry #%d for media upload: %s %s, following status: %d'
% (retry_num, self.method, self.uri, resp.status))
try:
resp, content = http.request(self.resumable_uri, method='PUT',
body=data,
headers=headers)
except:
self._in_error_state = True
raise
if resp.status < 500:
break
return self._process_response(resp, content)
def _process_response(self, resp, content):
"""Process the response from a single chunk upload.
Args:
resp: httplib2.Response, the response object.
content: string, the content of the response.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx or a 308.
"""
if resp.status in [200, 201]:
self._in_error_state = False
return None, self.postproc(resp, content)
elif resp.status == 308:
self._in_error_state = False
# A "308 Resume Incomplete" indicates we are not done.
self.resumable_progress = int(resp['range'].split('-')[1]) + 1
if 'location' in resp:
self.resumable_uri = resp['location']
else:
self._in_error_state = True
raise HttpError(resp, content, uri=self.uri)
return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
None)
def to_json(self):
"""Returns a JSON representation of the HttpRequest."""
d = copy.copy(self.__dict__)
if d['resumable'] is not None:
d['resumable'] = self.resumable.to_json()
del d['http']
del d['postproc']
del d['_sleep']
del d['_rand']
return simplejson.dumps(d)
@staticmethod
def from_json(s, http, postproc):
"""Returns an HttpRequest populated with info from a JSON object."""
d = simplejson.loads(s)
if d['resumable'] is not None:
d['resumable'] = MediaUpload.new_from_json(d['resumable'])
return HttpRequest(
http,
postproc,
uri=d['uri'],
method=d['method'],
body=d['body'],
headers=d['headers'],
methodId=d['methodId'],
resumable=d['resumable'])
class BatchHttpRequest(object):
"""Batches multiple HttpRequest objects into a single HTTP request.
Example:
from apiclient.http import BatchHttpRequest
def list_animals(request_id, response, exception):
\"\"\"Do something with the animals list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
def list_farmers(request_id, response, exception):
\"\"\"Do something with the farmers list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
service = build('farm', 'v2')
batch = BatchHttpRequest()
batch.add(service.animals().list(), list_animals)
batch.add(service.farmers().list(), list_farmers)
batch.execute(http=http)
"""
@util.positional(1)
def __init__(self, callback=None, batch_uri=None):
"""Constructor for a BatchHttpRequest.
Args:
callback: callable, A callback to be called for each response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no error occurred.
batch_uri: string, URI to send batch requests to.
"""
if batch_uri is None:
batch_uri = 'https://www.googleapis.com/batch'
self._batch_uri = batch_uri
# Global callback to be called for each individual response in the batch.
self._callback = callback
# A map from id to request.
self._requests = {}
# A map from id to callback.
self._callbacks = {}
# List of request ids, in the order in which they were added.
self._order = []
# The last auto generated id.
self._last_auto_id = 0
# Unique ID on which to base the Content-ID headers.
self._base_id = None
# A map from request id to (httplib2.Response, content) response pairs
self._responses = {}
# A map of id(Credentials) that have been refreshed.
self._refreshed_credentials = {}
def _refresh_and_apply_credentials(self, request, http):
"""Refresh the credentials and apply to the request.
Args:
request: HttpRequest, the request.
http: httplib2.Http, the global http object for the batch.
"""
# For the credentials to refresh, but only once per refresh_token
# If there is no http per the request then refresh the http passed in
# via execute()
creds = None
if request.http is not None and hasattr(request.http.request,
'credentials'):
creds = request.http.request.credentials
elif http is not None and hasattr(http.request, 'credentials'):
creds = http.request.credentials
if creds is not None:
if id(creds) not in self._refreshed_credentials:
creds.refresh(http)
self._refreshed_credentials[id(creds)] = 1
# Only apply the credentials if we are using the http object passed in,
# otherwise apply() will get called during _serialize_request().
if request.http is None or not hasattr(request.http.request,
'credentials'):
creds.apply(request.headers)
def _id_to_header(self, id_):
"""Convert an id to a Content-ID header value.
Args:
id_: string, identifier of individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is prepended to
the value because Content-ID headers are supposed to be universally
unique.
"""
if self._base_id is None:
self._base_id = uuid.uuid4()
return '<%s+%s>' % (self._base_id, urllib.quote(id_))
def _header_to_id(self, header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that _id_to_header()
returns.
Args:
header: string, Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if header[0] != '<' or header[-1] != '>':
raise BatchError("Invalid value for Content-ID: %s" % header)
if '+' not in header:
raise BatchError("Invalid value for Content-ID: %s" % header)
base, id_ = header[1:-1].rsplit('+', 1)
return urllib.unquote(id_)
def _serialize_request(self, request):
"""Convert an HttpRequest object into a string.
Args:
request: HttpRequest, the request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urlparse.urlparse(request.uri)
request_line = urlparse.urlunparse(
(None, None, parsed.path, parsed.params, parsed.query, None)
)
status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
major, minor = request.headers.get('content-type', 'application/json').split('/')
msg = MIMENonMultipart(major, minor)
headers = request.headers.copy()
if request.http is not None and hasattr(request.http.request,
'credentials'):
request.http.request.credentials.apply(headers)
# MIMENonMultipart adds its own Content-Type header.
if 'content-type' in headers:
del headers['content-type']
for key, value in headers.iteritems():
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
msg['content-length'] = str(len(request.body))
# Serialize the mime message.
fp = StringIO.StringIO()
# maxheaderlen=0 means don't line wrap headers.
g = Generator(fp, maxheaderlen=0)
g.flatten(msg, unixfrom=False)
body = fp.getvalue()
# Strip off the \n\n that the MIME lib tacks onto the end of the payload.
if request.body is None:
body = body[:-2]
return status_line.encode('utf-8') + body
def _deserialize_response(self, payload):
"""Convert string into httplib2 response and content.
Args:
payload: string, headers and body as a string.
Returns:
A pair (resp, content), such as would be returned from httplib2.request.
"""
# Strip off the status line
status_line, payload = payload.split('\n', 1)
protocol, status, reason = status_line.split(' ', 2)
# Parse the rest of the response
parser = FeedParser()
parser.feed(payload)
msg = parser.close()
msg['status'] = status
# Create httplib2.Response from the parsed headers.
resp = httplib2.Response(msg)
resp.reason = reason
resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
content = payload.split('\r\n\r\n', 1)[1]
return resp, content
def _new_id(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
string, a new unique id.
"""
self._last_auto_id += 1
while str(self._last_auto_id) in self._requests:
self._last_auto_id += 1
return str(self._last_auto_id)
@util.positional(2)
def add(self, request, callback=None, request_id=None):
"""Add a new request.
Every callback added will be paired with a unique id, the request_id. That
unique id will be passed back to the callback when the response comes back
from the server. The default behavior is to have the library generate it's
own unique id. If the caller passes in a request_id then they must ensure
uniqueness for each request_id, and if they are not an exception is
raised. Callers should either supply all request_ids or nevery supply a
request id, to avoid such an error.
Args:
request: HttpRequest, Request to add to the batch.
callback: callable, A callback to be called for this response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors occurred.
request_id: string, A unique id for the request. The id will be passed to
the callback with the response.
Returns:
None
Raises:
BatchError if a media request is added to a batch.
KeyError is the request_id is not unique.
"""
if request_id is None:
request_id = self._new_id()
if request.resumable is not None:
raise BatchError("Media requests cannot be used in a batch request.")
if request_id in self._requests:
raise KeyError("A request with this ID already exists: %s" % request_id)
self._requests[request_id] = request
self._callbacks[request_id] = callback
self._order.append(request_id)
def _execute(self, http, order, requests):
"""Serialize batch request, send to server, process response.
Args:
http: httplib2.Http, an http object to be used to make the request with.
order: list, list of request ids in the order they were added to the
batch.
request: list, list of request objects to send.
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = MIMEMultipart('mixed')
# Message should not write out it's own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for request_id in order:
request = requests[request_id]
msg = MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._id_to_header(request_id)
body = self._serialize_request(request)
msg.set_payload(body)
message.attach(msg)
body = message.as_string()
headers = {}
headers['content-type'] = ('multipart/mixed; '
'boundary="%s"') % message.get_boundary()
resp, content = http.request(self._batch_uri, method='POST', body=body,
headers=headers)
if resp.status >= 300:
raise HttpError(resp, content, uri=self._batch_uri)
# Now break out the individual responses and store each one.
boundary, _ = content.split(None, 1)
# Prepend with a content-type header so FeedParser can handle it.
header = 'content-type: %s\r\n\r\n' % resp['content-type']
for_parser = header + content
parser = FeedParser()
parser.feed(for_parser)
mime_response = parser.close()
if not mime_response.is_multipart():
raise BatchError("Response not in multipart/mixed format.", resp=resp,
content=content)
for part in mime_response.get_payload():
request_id = self._header_to_id(part['Content-ID'])
response, content = self._deserialize_response(part.get_payload())
self._responses[request_id] = (response, content)
@util.positional(1)
def execute(self, http=None):
"""Execute all the requests as a single batched HTTP request.
Args:
http: httplib2.Http, an http object to be used in place of the one the
HttpRequest request object was constructed with. If one isn't supplied
then use a http object from the requests in this batch.
Returns:
None
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
# If http is not supplied use the first valid one given in the requests.
if http is None:
for request_id in self._order:
request = self._requests[request_id]
if request is not None:
http = request.http
break
if http is None:
raise ValueError("Missing a valid http object.")
self._execute(http, self._order, self._requests)
# Loop over all the requests and check for 401s. For each 401 request the
# credentials should be refreshed and then sent again in a separate batch.
redo_requests = {}
redo_order = []
for request_id in self._order:
resp, content = self._responses[request_id]
if resp['status'] == '401':
redo_order.append(request_id)
request = self._requests[request_id]
self._refresh_and_apply_credentials(request, http)
redo_requests[request_id] = request
if redo_requests:
self._execute(http, redo_order, redo_requests)
# Now process all callbacks that are erroring, and raise an exception for
# ones that return a non-2xx response? Or add extra parameter to callback
# that contains an HttpError?
for request_id in self._order:
resp, content = self._responses[request_id]
request = self._requests[request_id]
callback = self._callbacks[request_id]
response = None
exception = None
try:
if resp.status >= 300:
raise HttpError(resp, content, uri=request.uri)
response = request.postproc(resp, content)
except HttpError, e:
exception = e
if callback is not None:
callback(request_id, response, exception)
if self._callback is not None:
self._callback(request_id, response, exception)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content, opt_expected_body) that should be
returned when that method is called. None may also be passed in for the
httplib2.Response, in which case a 200 OK response will be generated.
If an opt_expected_body (str or dict) is provided, it will be compared to
the body and UnexpectedBodyError will be raised on inequality.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'plus.activities.get': (None, response),
}
)
apiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content or raise an excpetion
if check_unexpected is set to True. The methodId is taken from the rpcName
in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses, check_unexpected=False):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
check_unexpected - A boolean setting whether or not UnexpectedMethodError
should be raised on unsupplied method.
"""
self.responses = responses
self.check_unexpected = check_unexpected
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None, resumable=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
response = self.responses[methodId]
resp, content = response[:2]
if len(response) > 2:
# Test the body against the supplied expected_body.
expected_body = response[2]
if bool(expected_body) != bool(body):
# Not expecting a body and provided one
# or expecting a body and not provided one.
raise UnexpectedBodyError(expected_body, body)
if isinstance(expected_body, str):
expected_body = simplejson.loads(expected_body)
body = simplejson.loads(body)
if body != expected_body:
raise UnexpectedBodyError(expected_body, body)
return HttpRequestMock(resp, content, postproc)
elif self.check_unexpected:
raise UnexpectedMethodError(methodId=methodId)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename=None, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
if filename:
f = file(filename, 'r')
self.data = f.read()
f.close()
else:
self.data = None
self.response_headers = headers
self.headers = None
self.uri = None
self.method = None
self.body = None
self.headers = None
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
self.uri = uri
self.method = method
self.body = body
self.headers = headers
return httplib2.Response(self.response_headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
'echo_request_uri' means return the request uri in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
self.follow_redirects = True
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
if hasattr(body, 'read'):
content = body.read()
else:
content = body
elif content == 'echo_request_uri':
content = uri
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
| mpl-2.0 |
nickw444/flask-sqlalchemy | docs/conf.py | 10 | 8400 | # -*- coding: utf-8 -*-
#
# Flask-SQLAlchemy documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 1 14:31:57 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-SQLAlchemy'
copyright = u'2010-2014, Armin Ronacher'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import pkg_resources
try:
release = pkg_resources.get_distribution('Flask-SQLAlchemy').version
except pkg_resources.DistributionNotFound:
print('To build the documentation, The distribution information of')
print('Flask-SQLAlchemy has to be available. Either install the package')
print('into your development environment or run "setup.py develop"')
print('to setup the metadata. A virtualenv is recommended!')
sys.exit(1)
del pkg_resources
if 'dev' in release:
release = release.split('dev')[0] + 'dev'
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': 'flask-sqlalchemy.png'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar. Do not set, template magic!
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-SQLAlchemydoc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flask-SQLAlchemy.tex', u'Flask-SQLAlchemy Documentation',
u'Armin Ronacher', 'manual'),
]
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
latex_elements = {
'fontpkg': r'\usepackage{mathpazo}',
'papersize': 'a4paper',
'pointsize': '12pt',
'preamble': r'\usepackage{flaskstyle}'
}
latex_use_parts = True
latex_additional_files = ['flaskstyle.sty', 'logo.pdf']
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-sqlalchemy', u'Flask-SQLAlchemy Documentation',
[u'Armin Ronacher'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None,
'http://flask.pocoo.org/docs/': None,
'http://www.sqlalchemy.org/docs/': None}
pygments_style = 'flask_theme_support.FlaskyStyle'
# fall back if theme is not there
try:
__import__('flask_theme_support')
except ImportError as e:
print('-' * 74)
print('Warning: Flask themes unavailable. Building with default theme')
print('If you want the Flask themes, run this command and build again:')
print()
print(' git submodule update --init')
print('-' * 74)
pygments_style = 'tango'
html_theme = 'default'
html_theme_options = {}
| bsd-3-clause |
eadgarchen/tensorflow | tensorflow/python/keras/applications/vgg16/__init__.py | 75 | 1127 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VGG16 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.applications.vgg16 import decode_predictions
from tensorflow.python.keras._impl.keras.applications.vgg16 import preprocess_input
from tensorflow.python.keras._impl.keras.applications.vgg16 import VGG16
del absolute_import
del division
del print_function
| apache-2.0 |
KohlsTechnology/ansible | lib/ansible/module_utils/ipa.py | 29 | 7657 | # -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2016 Thomas Krahn (@Nosmoht)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import json
except ImportError:
import simplejson as json
import re
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves.urllib.parse import quote
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.basic import env_fallback
class IPAClient(object):
def __init__(self, module, host, port, protocol):
self.host = host
self.port = port
self.protocol = protocol
self.module = module
self.headers = None
def get_base_url(self):
return '%s://%s/ipa' % (self.protocol, self.host)
def get_json_url(self):
return '%s/session/json' % self.get_base_url()
def login(self, username, password):
url = '%s/session/login_password' % self.get_base_url()
data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe=''))
headers = {'referer': self.get_base_url(),
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
try:
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers)
status_code = info['status']
if status_code not in [200, 201, 204]:
self._fail('login', info['msg'])
self.headers = {'referer': self.get_base_url(),
'Content-Type': 'application/json',
'Accept': 'application/json',
'Cookie': resp.info().get('Set-Cookie')}
except Exception as e:
self._fail('login', to_native(e))
def _fail(self, msg, e):
if 'message' in e:
err_string = e.get('message')
else:
err_string = e
self.module.fail_json(msg='%s: %s' % (msg, err_string))
def get_ipa_version(self):
response = self.ping()['summary']
ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*')
version_match = ipa_ver_regex.match(response)
ipa_version = None
if version_match:
ipa_version = version_match.groups()[0]
return ipa_version
def ping(self):
return self._post_json(method='ping', name=None)
def _post_json(self, method, name, item=None):
if item is None:
item = {}
url = '%s/session/json' % self.get_base_url()
data = dict(method=method)
if method != 'ping':
data['params'] = [[name], item]
else:
data['params'] = [[], {}]
try:
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)), headers=self.headers)
status_code = info['status']
if status_code not in [200, 201, 204]:
self._fail(method, info['msg'])
except Exception as e:
self._fail('post %s' % method, to_native(e))
if PY3:
charset = resp.headers.get_content_charset('latin-1')
else:
response_charset = resp.headers.getparam('charset')
if response_charset:
charset = response_charset
else:
charset = 'latin-1'
resp = json.loads(to_text(resp.read(), encoding=charset), encoding=charset)
err = resp.get('error')
if err is not None:
self._fail('response %s' % method, err)
if 'result' in resp:
result = resp.get('result')
if 'result' in result:
result = result.get('result')
if isinstance(result, list):
if len(result) > 0:
return result[0]
else:
return {}
return result
return None
def get_diff(self, ipa_data, module_data):
result = []
for key in module_data.keys():
mod_value = module_data.get(key, None)
if isinstance(mod_value, list):
default = []
else:
default = None
ipa_value = ipa_data.get(key, default)
if isinstance(ipa_value, list) and not isinstance(mod_value, list):
mod_value = [mod_value]
if isinstance(ipa_value, list) and isinstance(mod_value, list):
mod_value = sorted(mod_value)
ipa_value = sorted(ipa_value)
if mod_value != ipa_value:
result.append(key)
return result
def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None):
changed = False
diff = list(set(ipa_list) - set(module_list))
if len(diff) > 0:
changed = True
if not self.module.check_mode:
if item:
remove_method(name=name, item={item: diff})
else:
remove_method(name=name, item=diff)
diff = list(set(module_list) - set(ipa_list))
if len(diff) > 0:
changed = True
if not self.module.check_mode:
if item:
add_method(name=name, item={item: diff})
else:
add_method(name=name, item=diff)
return changed
def ipa_argument_spec():
return dict(
ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])),
ipa_host=dict(type='str', default='ipa.example.com', fallback=(env_fallback, ['IPA_HOST'])),
ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])),
ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])),
ipa_pass=dict(type='str', required=True, no_log=True, fallback=(env_fallback, ['IPA_PASS'])),
validate_certs=dict(type='bool', default=True),
)
| gpl-3.0 |
Midrya/chromium | third_party/logilab/common/modutils.py | 64 | 23589 | # -*- coding: utf-8 -*-
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Python modules manipulation utility functions.
:type PY_SOURCE_EXTS: tuple(str)
:var PY_SOURCE_EXTS: list of possible python source file extension
:type STD_LIB_DIR: str
:var STD_LIB_DIR: directory where standard modules are located
:type BUILTIN_MODULES: dict
:var BUILTIN_MODULES: dictionary with builtin module names has key
"""
__docformat__ = "restructuredtext en"
import sys
import os
from os.path import splitext, join, abspath, isdir, dirname, exists, basename
from imp import find_module, load_module, C_BUILTIN, PY_COMPILED, PKG_DIRECTORY
from distutils.sysconfig import get_config_var, get_python_lib, get_python_version
from distutils.errors import DistutilsPlatformError
from six.moves import range
try:
import zipimport
except ImportError:
zipimport = None
ZIPFILE = object()
from logilab.common import STD_BLACKLIST, _handle_blacklist
# Notes about STD_LIB_DIR
# Consider arch-specific installation for STD_LIB_DIR definition
# :mod:`distutils.sysconfig` contains to much hardcoded values to rely on
#
# :see: `Problems with /usr/lib64 builds <http://bugs.python.org/issue1294959>`_
# :see: `FHS <http://www.pathname.com/fhs/pub/fhs-2.3.html#LIBLTQUALGTALTERNATEFORMATESSENTIAL>`_
if sys.platform.startswith('win'):
PY_SOURCE_EXTS = ('py', 'pyw')
PY_COMPILED_EXTS = ('dll', 'pyd')
else:
PY_SOURCE_EXTS = ('py',)
PY_COMPILED_EXTS = ('so',)
try:
STD_LIB_DIR = get_python_lib(standard_lib=1)
# get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to
# non-valid path, see https://bugs.pypy.org/issue1164
except DistutilsPlatformError:
STD_LIB_DIR = '//'
EXT_LIB_DIR = get_python_lib()
BUILTIN_MODULES = dict(zip(sys.builtin_module_names,
[1]*len(sys.builtin_module_names)))
class NoSourceFile(Exception):
"""exception raised when we are not able to get a python
source file for a precompiled file
"""
class LazyObject(object):
def __init__(self, module, obj):
self.module = module
self.obj = obj
self._imported = None
def _getobj(self):
if self._imported is None:
self._imported = getattr(load_module_from_name(self.module),
self.obj)
return self._imported
def __getattribute__(self, attr):
try:
return super(LazyObject, self).__getattribute__(attr)
except AttributeError as ex:
return getattr(self._getobj(), attr)
def __call__(self, *args, **kwargs):
return self._getobj()(*args, **kwargs)
def load_module_from_name(dotted_name, path=None, use_sys=1):
"""Load a Python module from its name.
:type dotted_name: str
:param dotted_name: python name of a module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
def load_module_from_modpath(parts, path=None, use_sys=1):
"""Load a python module from its splitted name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package splitted on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules['.'.join(parts)]
except KeyError:
pass
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = '.'.join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname)
elif use_sys:
# because it may have been indirectly loaded through a parent
module = sys.modules.get(curname)
if module is None:
mp_file, mp_filename, mp_desc = find_module(part, path)
module = load_module(curname, mp_file, mp_filename, mp_desc)
if prevmodule:
setattr(prevmodule, part, module)
_file = getattr(module, '__file__', '')
if not _file and len(modpath) != len(parts):
raise ImportError('no module in %s' % '.'.join(parts[len(modpath):]) )
path = [dirname( _file )]
prevmodule = module
return module
def load_module_from_file(filepath, path=None, use_sys=1, extrapath=None):
"""Load a Python module from it's path.
:type filepath: str
:param filepath: path to the python module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
modpath = modpath_from_file(filepath, extrapath)
return load_module_from_modpath(modpath, path, use_sys)
def _check_init(path, mod_path):
"""check there are some __init__.py all along the way"""
for part in mod_path:
path = join(path, part)
if not _has_init(path):
return False
return True
def modpath_from_file(filename, extrapath=None):
"""given a file path return the corresponding splitted module's name
(i.e name of a module or package splitted on '.')
:type filename: str
:param filename: file's path for which we want the module's name
:type extrapath: dict
:param extrapath:
optional extra search path, with path as key and package name for the path
as value. This is usually useful to handle package splitted in multiple
directories using __path__ trick.
:raise ImportError:
if the corresponding module's name has not been found
:rtype: list(str)
:return: the corresponding splitted module's name
"""
base = splitext(abspath(filename))[0]
if extrapath is not None:
for path_ in extrapath:
path = abspath(path_)
if path and base[:len(path)] == path:
submodpath = [pkg for pkg in base[len(path):].split(os.sep)
if pkg]
if _check_init(path, submodpath[:-1]):
return extrapath[path_].split('.') + submodpath
for path in sys.path:
path = abspath(path)
if path and base.startswith(path):
modpath = [pkg for pkg in base[len(path):].split(os.sep) if pkg]
if _check_init(path, modpath[:-1]):
return modpath
raise ImportError('Unable to find module for %s in %s' % (
filename, ', \n'.join(sys.path)))
def file_from_modpath(modpath, path=None, context_file=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file, giving priority to source file over precompiled
file if it exists
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.')
(this means explicit relative imports that start with dots have
empty strings in this list!)
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the path to the module's file or None if it's an integrated
builtin module such as 'sys'
"""
if context_file is not None:
context = dirname(context_file)
else:
context = context_file
if modpath[0] == 'xml':
# handle _xmlplus
try:
return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context)
except ImportError:
return _file_from_modpath(modpath, path, context)
elif modpath == ['os', 'path']:
# FIXME: currently ignoring search_path...
return os.path.__file__
return _file_from_modpath(modpath, path, context)
def get_module_part(dotted_name, context_file=None):
"""given a dotted name return the module part of the name :
>>> get_module_part('logilab.common.modutils.get_module_part')
'logilab.common.modutils'
:type dotted_name: str
:param dotted_name: full name of the identifier we are interested in
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the module part of the name or None if we have not been able at
all to import the given name
XXX: deprecated, since it doesn't handle package precedence over module
(see #10066)
"""
# os.path trick
if dotted_name.startswith('os.path'):
return 'os.path'
parts = dotted_name.split('.')
if context_file is not None:
# first check for builtin module which won't be considered latter
# in that case (path != None)
if parts[0] in BUILTIN_MODULES:
if len(parts) > 2:
raise ImportError(dotted_name)
return parts[0]
# don't use += or insert, we want a new list to be created !
path = None
starti = 0
if parts[0] == '':
assert context_file is not None, \
'explicit relative import, but no context_file?'
path = [] # prevent resolving the import non-relatively
starti = 1
while parts[starti] == '': # for all further dots: change context
starti += 1
context_file = dirname(context_file)
for i in range(starti, len(parts)):
try:
file_from_modpath(parts[starti:i+1],
path=path, context_file=context_file)
except ImportError:
if not i >= max(1, len(parts) - 2):
raise
return '.'.join(parts[:i])
return dotted_name
def get_modules(package, src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
modules in the package and its subpackages
:type package: str
:param package: the python name for the package
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to
the value of `logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python modules in the package and its
subpackages
"""
modules = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
if directory != src_directory:
dir_package = directory[len(src_directory):].replace(os.sep, '.')
modules.append(package + dir_package)
for filename in filenames:
if _is_python_file(filename) and filename != '__init__.py':
src = join(directory, filename)
module = package + src[len(src_directory):-3]
modules.append(module.replace(os.sep, '.'))
return modules
def get_module_files(src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
module's files in the package and its subpackages
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python module's files in the package and
its subpackages
"""
files = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
for filename in filenames:
if _is_python_file(filename):
src = join(directory, filename)
files.append(src)
return files
def get_source_file(filename, include_no_ext=False):
"""given a python module's file name return the matching source file
name (the filename will be returned identically if it's a already an
absolute path to a python source file...)
:type filename: str
:param filename: python module's file name
:raise NoSourceFile: if no source file exists on the file system
:rtype: str
:return: the absolute path of the source file if it exists
"""
base, orig_ext = splitext(abspath(filename))
for ext in PY_SOURCE_EXTS:
source_path = '%s.%s' % (base, ext)
if exists(source_path):
return source_path
if include_no_ext and not orig_ext and exists(base):
return base
raise NoSourceFile(filename)
def cleanup_sys_modules(directories):
"""remove submodules of `directories` from `sys.modules`"""
cleaned = []
for modname, module in list(sys.modules.items()):
modfile = getattr(module, '__file__', None)
if modfile:
for directory in directories:
if modfile.startswith(directory):
cleaned.append(modname)
del sys.modules[modname]
break
return cleaned
def is_python_source(filename):
"""
rtype: bool
return: True if the filename is a python source file
"""
return splitext(filename)[1][1:] in PY_SOURCE_EXTS
def is_standard_module(modname, std_path=(STD_LIB_DIR,)):
"""try to guess if a module is a standard python module (by default,
see `std_path` parameter's description)
:type modname: str
:param modname: name of the module we are interested in
:type std_path: list(str) or tuple(str)
:param std_path: list of path considered has standard
:rtype: bool
:return:
true if the module:
- is located on the path listed in one of the directory in `std_path`
- is a built-in module
"""
modname = modname.split('.')[0]
try:
filename = file_from_modpath([modname])
except ImportError as ex:
# import failed, i'm probably not so wrong by supposing it's
# not standard...
return 0
# modules which are not living in a file are considered standard
# (sys and __builtin__ for instance)
if filename is None:
return 1
filename = abspath(filename)
if filename.startswith(EXT_LIB_DIR):
return 0
for path in std_path:
if filename.startswith(abspath(path)):
return 1
return False
def is_relative(modname, from_file):
"""return true if the given module name is relative to the given
file name
:type modname: str
:param modname: name of the module we are interested in
:type from_file: str
:param from_file:
path of the module from which modname has been imported
:rtype: bool
:return:
true if the module has been imported relatively to `from_file`
"""
if not isdir(from_file):
from_file = dirname(from_file)
if from_file in sys.path:
return False
try:
find_module(modname.split('.')[0], [from_file])
return True
except ImportError:
return False
# internal only functions #####################################################
def _file_from_modpath(modpath, path=None, context=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file
this function is used internally, see `file_from_modpath`'s
documentation for more information
"""
assert len(modpath) > 0
if context is not None:
try:
mtype, mp_filename = _module_file(modpath, [context])
except ImportError:
mtype, mp_filename = _module_file(modpath, path)
else:
mtype, mp_filename = _module_file(modpath, path)
if mtype == PY_COMPILED:
try:
return get_source_file(mp_filename)
except NoSourceFile:
return mp_filename
elif mtype == C_BUILTIN:
# integrated builtin module
return None
elif mtype == PKG_DIRECTORY:
mp_filename = _has_init(mp_filename)
return mp_filename
def _search_zip(modpath, pic):
for filepath, importer in pic.items():
if importer is not None:
if importer.find_module(modpath[0]):
if not importer.find_module('/'.join(modpath)):
raise ImportError('No module named %s in %s/%s' % (
'.'.join(modpath[1:]), filepath, modpath))
return ZIPFILE, abspath(filepath) + '/' + '/'.join(modpath), filepath
raise ImportError('No module named %s' % '.'.join(modpath))
try:
import pkg_resources
except ImportError:
pkg_resources = None
def _module_file(modpath, path=None):
"""get a module type / file path
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.'), with leading empty strings for explicit relative import
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:rtype: tuple(int, str)
:return: the module type flag and the file path for a module
"""
# egg support compat
try:
pic = sys.path_importer_cache
_path = (path is None and sys.path or path)
for __path in _path:
if not __path in pic:
try:
pic[__path] = zipimport.zipimporter(__path)
except zipimport.ZipImportError:
pic[__path] = None
checkeggs = True
except AttributeError:
checkeggs = False
# pkg_resources support (aka setuptools namespace packages)
if (pkg_resources is not None
and modpath[0] in pkg_resources._namespace_packages
and modpath[0] in sys.modules
and len(modpath) > 1):
# setuptools has added into sys.modules a module object with proper
# __path__, get back information from there
module = sys.modules[modpath.pop(0)]
path = module.__path__
imported = []
while modpath:
modname = modpath[0]
# take care to changes in find_module implementation wrt builtin modules
#
# Python 2.6.6 (r266:84292, Sep 11 2012, 08:34:23)
# >>> imp.find_module('posix')
# (None, 'posix', ('', '', 6))
#
# Python 3.3.1 (default, Apr 26 2013, 12:08:46)
# >>> imp.find_module('posix')
# (None, None, ('', '', 6))
try:
_, mp_filename, mp_desc = find_module(modname, path)
except ImportError:
if checkeggs:
return _search_zip(modpath, pic)[:2]
raise
else:
if checkeggs and mp_filename:
fullabspath = [abspath(x) for x in _path]
try:
pathindex = fullabspath.index(dirname(abspath(mp_filename)))
emtype, emp_filename, zippath = _search_zip(modpath, pic)
if pathindex > _path.index(zippath):
# an egg takes priority
return emtype, emp_filename
except ValueError:
# XXX not in _path
pass
except ImportError:
pass
checkeggs = False
imported.append(modpath.pop(0))
mtype = mp_desc[2]
if modpath:
if mtype != PKG_DIRECTORY:
raise ImportError('No module %s in %s' % ('.'.join(modpath),
'.'.join(imported)))
# XXX guess if package is using pkgutil.extend_path by looking for
# those keywords in the first four Kbytes
try:
with open(join(mp_filename, '__init__.py')) as stream:
data = stream.read(4096)
except IOError:
path = [mp_filename]
else:
if 'pkgutil' in data and 'extend_path' in data:
# extend_path is called, search sys.path for module/packages
# of this name see pkgutil.extend_path documentation
path = [join(p, *imported) for p in sys.path
if isdir(join(p, *imported))]
else:
path = [mp_filename]
return mtype, mp_filename
def _is_python_file(filename):
"""return true if the given filename should be considered as a python file
.pyc and .pyo are ignored
"""
for ext in ('.py', '.so', '.pyd', '.pyw'):
if filename.endswith(ext):
return True
return False
def _has_init(directory):
"""if the given directory has a valid __init__ file, return its path,
else return None
"""
mod_or_pack = join(directory, '__init__')
for ext in PY_SOURCE_EXTS + ('pyc', 'pyo'):
if exists(mod_or_pack + '.' + ext):
return mod_or_pack + '.' + ext
return None
| bsd-3-clause |
TimesysGit/advantech-linux | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
mdanielwork/intellij-community | python/helpers/coveragepy/coverage/html.py | 40 | 14724 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""HTML reporting for coverage.py."""
import datetime
import json
import os
import shutil
import coverage
from coverage import env
from coverage.backward import iitems
from coverage.files import flat_rootname
from coverage.misc import CoverageException, Hasher, isolate_module
from coverage.report import Reporter
from coverage.results import Numbers
from coverage.templite import Templite
os = isolate_module(os)
# Static files are looked for in a list of places.
STATIC_PATH = [
# The place Debian puts system Javascript libraries.
"/usr/share/javascript",
# Our htmlfiles directory.
os.path.join(os.path.dirname(__file__), "htmlfiles"),
]
def data_filename(fname, pkgdir=""):
"""Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that sub-directory.
"""
tried = []
for static_dir in STATIC_PATH:
static_filename = os.path.join(static_dir, fname)
if os.path.exists(static_filename):
return static_filename
else:
tried.append(static_filename)
if pkgdir:
static_filename = os.path.join(static_dir, pkgdir, fname)
if os.path.exists(static_filename):
return static_filename
else:
tried.append(static_filename)
raise CoverageException(
"Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried)
)
def read_data(fname):
"""Return the contents of a data file of ours."""
with open(data_filename(fname)) as data_file:
return data_file.read()
def write_html(fname, html):
"""Write `html` to `fname`, properly encoded."""
with open(fname, "wb") as fout:
fout.write(html.encode('ascii', 'xmlcharrefreplace'))
class HtmlReporter(Reporter):
"""HTML reporting."""
# These files will be copied from the htmlfiles directory to the output
# directory.
STATIC_FILES = [
("style.css", ""),
("jquery.min.js", "jquery"),
("jquery.debounce.min.js", "jquery-debounce"),
("jquery.hotkeys.js", "jquery-hotkeys"),
("jquery.isonscreen.js", "jquery-isonscreen"),
("jquery.tablesorter.min.js", "jquery-tablesorter"),
("coverage_html.js", ""),
("keybd_closed.png", ""),
("keybd_open.png", ""),
]
def __init__(self, cov, config):
super(HtmlReporter, self).__init__(cov, config)
self.directory = None
title = self.config.html_title
if env.PY2:
title = title.decode("utf8")
self.template_globals = {
'escape': escape,
'pair': pair,
'title': title,
'__url__': coverage.__url__,
'__version__': coverage.__version__,
}
self.source_tmpl = Templite(read_data("pyfile.html"), self.template_globals)
self.coverage = cov
self.files = []
self.has_arcs = self.coverage.data.has_arcs()
self.status = HtmlStatus()
self.extra_css = None
self.totals = Numbers()
self.time_stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
def report(self, morfs):
"""Generate an HTML report for `morfs`.
`morfs` is a list of modules or file names.
"""
assert self.config.html_dir, "must give a directory for html reporting"
# Read the status data.
self.status.read(self.config.html_dir)
# Check that this run used the same settings as the last run.
m = Hasher()
m.update(self.config)
these_settings = m.hexdigest()
if self.status.settings_hash() != these_settings:
self.status.reset()
self.status.set_settings_hash(these_settings)
# The user may have extra CSS they want copied.
if self.config.extra_css:
self.extra_css = os.path.basename(self.config.extra_css)
# Process all the files.
self.report_files(self.html_file, morfs, self.config.html_dir)
if not self.files:
raise CoverageException("No data to report.")
# Write the index file.
self.index_file()
self.make_local_static_report_files()
return self.totals.n_statements and self.totals.pc_covered
def make_local_static_report_files(self):
"""Make local instances of static files for HTML report."""
# The files we provide must always be copied.
for static, pkgdir in self.STATIC_FILES:
shutil.copyfile(
data_filename(static, pkgdir),
os.path.join(self.directory, static)
)
# The user may have extra CSS they want copied.
if self.extra_css:
shutil.copyfile(
self.config.extra_css,
os.path.join(self.directory, self.extra_css)
)
def file_hash(self, source, fr):
"""Compute a hash that changes if the file needs to be re-reported."""
m = Hasher()
m.update(source)
self.coverage.data.add_to_hash(fr.filename, m)
return m.hexdigest()
def html_file(self, fr, analysis):
"""Generate an HTML file for one source file."""
source = fr.source()
# Find out if the file on disk is already correct.
rootname = flat_rootname(fr.relative_filename())
this_hash = self.file_hash(source.encode('utf-8'), fr)
that_hash = self.status.file_hash(rootname)
if this_hash == that_hash:
# Nothing has changed to require the file to be reported again.
self.files.append(self.status.index_info(rootname))
return
self.status.set_file_hash(rootname, this_hash)
# Get the numbers for this file.
nums = analysis.numbers
if self.has_arcs:
missing_branch_arcs = analysis.missing_branch_arcs()
arcs_executed = analysis.arcs_executed()
# These classes determine which lines are highlighted by default.
c_run = "run hide_run"
c_exc = "exc"
c_mis = "mis"
c_par = "par " + c_run
lines = []
for lineno, line in enumerate(fr.source_token_lines(), start=1):
# Figure out how to mark this line.
line_class = []
annotate_html = ""
annotate_long = ""
if lineno in analysis.statements:
line_class.append("stm")
if lineno in analysis.excluded:
line_class.append(c_exc)
elif lineno in analysis.missing:
line_class.append(c_mis)
elif self.has_arcs and lineno in missing_branch_arcs:
line_class.append(c_par)
shorts = []
longs = []
for b in missing_branch_arcs[lineno]:
if b < 0:
shorts.append("exit")
else:
shorts.append(b)
longs.append(fr.missing_arc_description(lineno, b, arcs_executed))
# 202F is NARROW NO-BREAK SPACE.
# 219B is RIGHTWARDS ARROW WITH STROKE.
short_fmt = "%s ↛ %s"
annotate_html = ", ".join(short_fmt % (lineno, d) for d in shorts)
if len(longs) == 1:
annotate_long = longs[0]
else:
annotate_long = "%d missed branches: %s" % (
len(longs),
", ".join("%d) %s" % (num, ann_long)
for num, ann_long in enumerate(longs, start=1)),
)
elif lineno in analysis.statements:
line_class.append(c_run)
# Build the HTML for the line.
html = []
for tok_type, tok_text in line:
if tok_type == "ws":
html.append(escape(tok_text))
else:
tok_html = escape(tok_text) or ' '
html.append(
'<span class="%s">%s</span>' % (tok_type, tok_html)
)
lines.append({
'html': ''.join(html),
'number': lineno,
'class': ' '.join(line_class) or "pln",
'annotate': annotate_html,
'annotate_long': annotate_long,
})
# Write the HTML page for this file.
html = self.source_tmpl.render({
'c_exc': c_exc,
'c_mis': c_mis,
'c_par': c_par,
'c_run': c_run,
'has_arcs': self.has_arcs,
'extra_css': self.extra_css,
'fr': fr,
'nums': nums,
'lines': lines,
'time_stamp': self.time_stamp,
})
html_filename = rootname + ".html"
html_path = os.path.join(self.directory, html_filename)
write_html(html_path, html)
# Save this file's information for the index file.
index_info = {
'nums': nums,
'html_filename': html_filename,
'relative_filename': fr.relative_filename(),
}
self.files.append(index_info)
self.status.set_index_info(rootname, index_info)
def index_file(self):
"""Write the index.html file for this report."""
index_tmpl = Templite(read_data("index.html"), self.template_globals)
self.totals = sum(f['nums'] for f in self.files)
html = index_tmpl.render({
'has_arcs': self.has_arcs,
'extra_css': self.extra_css,
'files': self.files,
'totals': self.totals,
'time_stamp': self.time_stamp,
})
write_html(os.path.join(self.directory, "index.html"), html)
# Write the latest hashes for next time.
self.status.write(self.directory)
class HtmlStatus(object):
"""The status information we keep to support incremental reporting."""
STATUS_FILE = "status.json"
STATUS_FORMAT = 1
# pylint: disable=wrong-spelling-in-comment,useless-suppression
# The data looks like:
#
# {
# 'format': 1,
# 'settings': '540ee119c15d52a68a53fe6f0897346d',
# 'version': '4.0a1',
# 'files': {
# 'cogapp___init__': {
# 'hash': 'e45581a5b48f879f301c0f30bf77a50c',
# 'index': {
# 'html_filename': 'cogapp___init__.html',
# 'name': 'cogapp/__init__',
# 'nums': <coverage.results.Numbers object at 0x10ab7ed0>,
# }
# },
# ...
# 'cogapp_whiteutils': {
# 'hash': '8504bb427fc488c4176809ded0277d51',
# 'index': {
# 'html_filename': 'cogapp_whiteutils.html',
# 'name': 'cogapp/whiteutils',
# 'nums': <coverage.results.Numbers object at 0x10ab7d90>,
# }
# },
# },
# }
def __init__(self):
self.reset()
def reset(self):
"""Initialize to empty."""
self.settings = ''
self.files = {}
def read(self, directory):
"""Read the last status in `directory`."""
usable = False
try:
status_file = os.path.join(directory, self.STATUS_FILE)
with open(status_file, "r") as fstatus:
status = json.load(fstatus)
except (IOError, ValueError):
usable = False
else:
usable = True
if status['format'] != self.STATUS_FORMAT:
usable = False
elif status['version'] != coverage.__version__:
usable = False
if usable:
self.files = {}
for filename, fileinfo in iitems(status['files']):
fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
self.files[filename] = fileinfo
self.settings = status['settings']
else:
self.reset()
def write(self, directory):
"""Write the current status to `directory`."""
status_file = os.path.join(directory, self.STATUS_FILE)
files = {}
for filename, fileinfo in iitems(self.files):
fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
files[filename] = fileinfo
status = {
'format': self.STATUS_FORMAT,
'version': coverage.__version__,
'settings': self.settings,
'files': files,
}
with open(status_file, "w") as fout:
json.dump(status, fout)
# Older versions of ShiningPanda look for the old name, status.dat.
# Accomodate them if we are running under Jenkins.
# https://issues.jenkins-ci.org/browse/JENKINS-28428
if "JENKINS_URL" in os.environ:
with open(os.path.join(directory, "status.dat"), "w") as dat:
dat.write("https://issues.jenkins-ci.org/browse/JENKINS-28428\n")
def settings_hash(self):
"""Get the hash of the coverage.py settings."""
return self.settings
def set_settings_hash(self, settings):
"""Set the hash of the coverage.py settings."""
self.settings = settings
def file_hash(self, fname):
"""Get the hash of `fname`'s contents."""
return self.files.get(fname, {}).get('hash', '')
def set_file_hash(self, fname, val):
"""Set the hash of `fname`'s contents."""
self.files.setdefault(fname, {})['hash'] = val
def index_info(self, fname):
"""Get the information for index.html for `fname`."""
return self.files.get(fname, {}).get('index', {})
def set_index_info(self, fname, info):
"""Set the information for index.html for `fname`."""
self.files.setdefault(fname, {})['index'] = info
# Helpers for templates and generating HTML
def escape(t):
"""HTML-escape the text in `t`.
This is only suitable for HTML text, not attributes.
"""
# Convert HTML special chars into HTML entities.
return t.replace("&", "&").replace("<", "<")
def pair(ratio):
"""Format a pair of numbers so JavaScript can read them in an attribute."""
return "%s %s" % ratio
| apache-2.0 |
huyang1532/asciidoc | filters/graphviz/graphviz2png.py | 29 | 5484 | #!/usr/bin/env python
import os, sys, subprocess
from optparse import *
__AUTHOR__ = "Gouichi Iisaka <iisaka51@gmail.com>"
__VERSION__ = '1.1.4'
class EApp(Exception):
'''Application specific exception.'''
pass
class Application():
'''
NAME
graphviz2png - Converts textual graphviz notation to PNG file
SYNOPSIS
graphviz2png [options] INFILE
DESCRIPTION
This filter reads Graphviz notation text from the input file
INFILE (or stdin if INFILE is -), converts it to a PNG image file.
OPTIONS
-o OUTFILE, --outfile=OUTFILE
The file name of the output file. If not specified the output file is
named like INFILE but with a .png file name extension.
-L LAYOUT, --layout=LAYOUT
Graphviz layout: dot, neato, twopi, circo, fdp
Default is 'dot'.
-F FORMAT, --format=FORMAT
Graphviz output format: png, svg, or any other format Graphviz
supports. Run dot -T? to get the full list.
Default is 'png'.
-v, --verbose
Verbosely print processing information to stderr.
-h, --help
Print this documentation.
-V, --version
Print program version number.
SEE ALSO
graphviz(1)
AUTHOR
Written by Gouichi Iisaka, <iisaka51@gmail.com>
Format support added by Elmo Todurov, <todurov@gmail.com>
THANKS
Stuart Rackham, <srackham@gmail.com>
This script was inspired by his music2png.py and AsciiDoc
LICENSE
Copyright (C) 2008-2009 Gouichi Iisaka.
Free use of this software is granted under the terms of
the GNU General Public License (GPL).
'''
def __init__(self, argv=None):
# Run dot, get the list of supported formats. It's prefixed by some junk.
format_output = subprocess.Popen(["dot", "-T?"], stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[1]
# The junk contains : and ends with :. So we split it, then strip the final endline, then split the list for future usage.
supported_formats = format_output.split(": ")[2][:-1].split(" ")
if not argv:
argv = sys.argv
self.usage = '%prog [options] inputfile'
self.version = 'Version: %s\n' % __VERSION__
self.version += 'Copyright(c) 2008-2009: %s\n' % __AUTHOR__
self.option_list = [
Option("-o", "--outfile", action="store",
dest="outfile",
help="Output file"),
Option("-L", "--layout", action="store",
dest="layout", default="dot", type="choice",
choices=['dot','neato','twopi','circo','fdp'],
help="Layout type. LAYOUT=<dot|neato|twopi|circo|fdp>"),
Option("-F", "--format", action="store",
dest="format", default="png", type="choice",
choices=supported_formats,
help="Format type. FORMAT=<" + "|".join(supported_formats) + ">"),
Option("--debug", action="store_true",
dest="do_debug",
help=SUPPRESS_HELP),
Option("-v", "--verbose", action="store_true",
dest="do_verbose", default=False,
help="verbose output"),
]
self.parser = OptionParser( usage=self.usage, version=self.version,
option_list=self.option_list)
(self.options, self.args) = self.parser.parse_args()
if len(self.args) != 1:
self.parser.print_help()
sys.exit(1)
self.options.infile = self.args[0]
def systemcmd(self, cmd):
if self.options.do_verbose:
msg = 'Execute: %s' % cmd
sys.stderr.write(msg + os.linesep)
else:
cmd += ' 2>%s' % os.devnull
if os.system(cmd):
raise EApp, 'failed command: %s' % cmd
def graphviz2png(self, infile, outfile):
'''Convert Graphviz notation in file infile to
PNG file named outfile.'''
outfile = os.path.abspath(outfile)
outdir = os.path.dirname(outfile)
if not os.path.isdir(outdir):
raise EApp, 'directory does not exist: %s' % outdir
basefile = os.path.splitext(outfile)[0]
saved_cwd = os.getcwd()
os.chdir(outdir)
try:
cmd = '%s -T%s "%s" > "%s"' % (
self.options.layout, self.options.format, infile, outfile)
self.systemcmd(cmd)
finally:
os.chdir(saved_cwd)
if not self.options.do_debug:
os.unlink(infile)
def run(self):
if self.options.format == '':
self.options.format = 'png'
if self.options.infile == '-':
if self.options.outfile is None:
sys.stderr.write('OUTFILE must be specified')
sys.exit(1)
infile = os.path.splitext(self.options.outfile)[0] + '.txt'
lines = sys.stdin.readlines()
open(infile, 'w').writelines(lines)
if not os.path.isfile(infile):
raise EApp, 'input file does not exist: %s' % infile
if self.options.outfile is None:
outfile = os.path.splitext(infile)[0] + '.png'
else:
outfile = self.options.outfile
self.graphviz2png(infile, outfile)
# To suppress asciidoc 'no output from filter' warnings.
if self.options.infile == '-':
sys.stdout.write(' ')
if __name__ == "__main__":
app = Application()
app.run()
| gpl-2.0 |
vmindru/ansible-modules-core | cloud/openstack/os_router.py | 43 | 13894 | #!/usr/bin/python
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
DOCUMENTATION = '''
---
module: os_router
short_description: Create or delete routers from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Create or Delete routers from OpenStack. Although Neutron allows
routers to share the same name, this module enforces name uniqueness
to be more user friendly.
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
admin_state_up:
description:
- Desired admin state of the created or existing router.
required: false
default: true
enable_snat:
description:
- Enable Source NAT (SNAT) attribute.
required: false
default: true
network:
description:
- Unique name or ID of the external gateway network.
- required I(interfaces) or I(enable_snat) are provided.
type: string
required: false
default: None
project:
description:
- Unique name or ID of the project.
type: string
required: false
default: None
version_added: "2.2"
external_fixed_ips:
description:
- The IP address parameters for the external gateway network. Each
is a dictionary with the subnet name or ID (subnet) and the IP
address to assign on the subnet (ip). If no IP is specified,
one is automatically assigned from that subnet.
required: false
default: None
interfaces:
description:
- List of subnets to attach to the router internal interface.
required: false
default: None
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a simple router, not attached to a gateway or subnets.
- os_router:
cloud: mycloud
state: present
name: simple_router
# Create a simple router, not attached to a gateway or subnets for a given project.
- os_router:
cloud: mycloud
state: present
name: simple_router
project: myproj
# Creates a router attached to ext_network1 on an IPv4 subnet and one
# internal subnet interface.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
interfaces:
- private-subnet
# Update existing router1 external gateway to include the IPv6 subnet.
# Note that since 'interfaces' is not provided, any existing internal
# interfaces on an existing router will be left intact.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
- subnet: ipv6-public-subnet
ip: 2001:db8::3
# Delete router1
- os_router:
cloud: mycloud
state: absent
name: router1
'''
RETURN = '''
router:
description: Dictionary describing the router.
returned: On success when I(state) is 'present'
type: dictionary
contains:
id:
description: Router ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Router name.
type: string
sample: "router1"
admin_state_up:
description: Administrative state of the router.
type: boolean
sample: true
status:
description: The router status.
type: string
sample: "ACTIVE"
tenant_id:
description: The tenant ID.
type: string
sample: "861174b82b43463c9edc5202aadc60ef"
external_gateway_info:
description: The external gateway parameters.
type: dictionary
sample: {
"enable_snat": true,
"external_fixed_ips": [
{
"ip_address": "10.6.6.99",
"subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
}
]
}
routes:
description: The extra routes configuration for L3 router.
type: list
'''
def _needs_update(cloud, module, router, network, internal_subnet_ids):
"""Decide if the given router needs an update.
"""
if router['admin_state_up'] != module.params['admin_state_up']:
return True
if router['external_gateway_info']:
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
return True
if network:
if not router['external_gateway_info']:
return True
elif router['external_gateway_info']['network_id'] != network['id']:
return True
# check external interfaces
if module.params['external_fixed_ips']:
for new_iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(new_iface['subnet'])
exists = False
# compare the requested interface with existing, looking for an existing match
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
if existing_iface['subnet_id'] == subnet['id']:
if 'ip' in new_iface:
if existing_iface['ip_address'] == new_iface['ip']:
# both subnet id and ip address match
exists = True
break
else:
# only the subnet was given, so ip doesn't matter
exists = True
break
# this interface isn't present on the existing router
if not exists:
return True
# check internal interfaces
if module.params['interfaces']:
existing_subnet_ids = []
for port in cloud.list_router_interfaces(router, 'internal'):
if 'fixed_ips' in port:
for fixed_ip in port['fixed_ips']:
existing_subnet_ids.append(fixed_ip['subnet_id'])
if set(internal_subnet_ids) != set(existing_subnet_ids):
return True
return False
def _system_state_change(cloud, module, router, network, internal_ids):
"""Check if the system state would be changed."""
state = module.params['state']
if state == 'absent' and router:
return True
if state == 'present':
if not router:
return True
return _needs_update(cloud, module, router, network, internal_ids)
return False
def _build_kwargs(cloud, module, router, network):
kwargs = {
'admin_state_up': module.params['admin_state_up'],
}
if router:
kwargs['name_or_id'] = router['id']
else:
kwargs['name'] = module.params['name']
if network:
kwargs['ext_gateway_net_id'] = network['id']
# can't send enable_snat unless we have a network
kwargs['enable_snat'] = module.params['enable_snat']
if module.params['external_fixed_ips']:
kwargs['ext_fixed_ips'] = []
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
d = {'subnet_id': subnet['id']}
if 'ip' in iface:
d['ip_address'] = iface['ip']
kwargs['ext_fixed_ips'].append(d)
return kwargs
def _validate_subnets(module, cloud):
external_subnet_ids = []
internal_subnet_ids = []
if module.params['external_fixed_ips']:
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
external_subnet_ids.append(subnet['id'])
if module.params['interfaces']:
for iface in module.params['interfaces']:
subnet = cloud.get_subnet(iface)
if not subnet:
module.fail_json(msg='subnet %s not found' % iface)
internal_subnet_ids.append(subnet['id'])
return external_subnet_ids, internal_subnet_ids
def main():
argument_spec = openstack_full_argument_spec(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
admin_state_up=dict(type='bool', default=True),
enable_snat=dict(type='bool', default=True),
network=dict(default=None),
interfaces=dict(type='list', default=None),
external_fixed_ips=dict(type='list', default=None),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['project'] and
StrictVersion(shade.__version__) <= StrictVersion('1.9.0')):
module.fail_json(msg="To utilize project, the installed version of"
"the shade library MUST be > 1.9.0")
state = module.params['state']
name = module.params['name']
network = module.params['network']
project = module.params['project']
if module.params['external_fixed_ips'] and not network:
module.fail_json(msg='network is required when supplying external_fixed_ips')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
router = cloud.get_router(name, filters=filters)
net = None
if network:
net = cloud.get_network(network)
if not net:
module.fail_json(msg='network %s not found' % network)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.
external_ids, internal_ids = _validate_subnets(module, cloud)
if module.check_mode:
module.exit_json(
changed=_system_state_change(cloud, module, router, net, internal_ids)
)
if state == 'present':
changed = False
if not router:
kwargs = _build_kwargs(cloud, module, router, net)
if project_id:
kwargs['project_id'] = project_id
router = cloud.create_router(**kwargs)
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
else:
if _needs_update(cloud, module, router, net, internal_ids):
kwargs = _build_kwargs(cloud, module, router, net)
updated_router = cloud.update_router(**kwargs)
# Protect against update_router() not actually
# updating the router.
if not updated_router:
changed = False
# On a router update, if any internal interfaces were supplied,
# just detach all existing internal interfaces and attach the new.
elif internal_ids:
router = updated_router
ports = cloud.list_router_interfaces(router, 'internal')
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
module.exit_json(changed=changed,
router=router,
id=router['id'])
elif state == 'absent':
if not router:
module.exit_json(changed=False)
else:
# We need to detach all internal interfaces on a router before
# we will be allowed to delete it.
ports = cloud.list_router_interfaces(router, 'internal')
router_id = router['id']
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
cloud.delete_router(router_id)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
mahak/spark | python/pyspark/pandas/ml.py | 9 | 4160 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Tuple, TYPE_CHECKING, cast
import numpy as np
import pandas as pd
import pyspark
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.stat import Correlation
from pyspark.pandas._typing import Label
from pyspark.pandas.utils import column_labels_level
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
CORRELATION_OUTPUT_COLUMN = "__correlation_output__"
def corr(psdf: "ps.DataFrame", method: str = "pearson") -> pd.DataFrame:
"""
The correlation matrix of all the numerical columns of this dataframe.
Only accepts scalar numerical values for now.
:param psdf: the pandas-on-Spark dataframe.
:param method: {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
:return: :class:`pandas.DataFrame`
>>> ps.DataFrame({'A': [0, 1], 'B': [1, 0], 'C': ['x', 'y']}).corr()
A B
A 1.0 -1.0
B -1.0 1.0
"""
assert method in ("pearson", "spearman")
ndf, column_labels = to_numeric_df(psdf)
corr = Correlation.corr(ndf, CORRELATION_OUTPUT_COLUMN, method)
pcorr = cast(pd.DataFrame, corr.toPandas())
arr = pcorr.iloc[0, 0].toArray()
if column_labels_level(column_labels) > 1:
idx = pd.MultiIndex.from_tuples(column_labels)
else:
idx = pd.Index([label[0] for label in column_labels])
return pd.DataFrame(arr, columns=idx, index=idx)
def to_numeric_df(psdf: "ps.DataFrame") -> Tuple[pyspark.sql.DataFrame, List[Label]]:
"""
Takes a dataframe and turns it into a dataframe containing a single numerical
vector of doubles. This dataframe has a single field called '_1'.
TODO: index is not preserved currently
:param psdf: the pandas-on-Spark dataframe.
:return: a pair of dataframe, list of strings (the name of the columns
that were converted to numerical types)
>>> to_numeric_df(ps.DataFrame({'A': [0, 1], 'B': [1, 0], 'C': ['x', 'y']}))
(DataFrame[__correlation_output__: vector], [('A',), ('B',)])
"""
# TODO, it should be more robust.
accepted_types = {
np.dtype(dt)
for dt in [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.bool_]
}
numeric_column_labels = [
label for label in psdf._internal.column_labels if psdf[label].dtype in accepted_types
]
numeric_df = psdf._internal.spark_frame.select(
*[psdf._internal.spark_column_for(idx) for idx in numeric_column_labels]
)
va = VectorAssembler(inputCols=numeric_df.columns, outputCol=CORRELATION_OUTPUT_COLUMN)
v = va.transform(numeric_df).select(CORRELATION_OUTPUT_COLUMN)
return v, numeric_column_labels
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.ml
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.ml.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = SparkSession.builder.master("local[4]").appName("pyspark.pandas.ml tests").getOrCreate()
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.ml, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
andrenam/Fogger | fogger_lib/Bridge.py | 1 | 8215 | ### BEGIN LICENSE
# Copyright (C) 2012 Owais Lone <hello@owaislone.org>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
#from gi.repository import Gtk, Unity, Notify, Dbusmenu, TelepathyGLib
from gi.repository import Gtk, Notify, TelepathyGLib
Notify.init('fogger')
TELEPATHY_PRESENCE_MAP = {
TelepathyGLib.ConnectionPresenceType.AVAILABLE: 'available',
TelepathyGLib.ConnectionPresenceType.AWAY: 'away',
TelepathyGLib.ConnectionPresenceType.EXTENDED_AWAY: 'away',
TelepathyGLib.ConnectionPresenceType.BUSY: 'busy',
TelepathyGLib.ConnectionPresenceType.OFFLINE: 'offline',
}
class DesktopBridge:
icon_name = None
desktop_file = None
actions = {}
launcher_actions = {}
def __init__(self, root, desktop_file, icon_name=None):
self.W = root
self.desktop_file = desktop_file
self.icon_name = icon_name
#self.launcher_entry = Unity.LauncherEntry.get_for_desktop_file(self.desktop_file)
#self.quicklist = Dbusmenu.Menuitem.new()
#self.launcher_entry.set_property("quicklist", self.quicklist)
self.launcher_entry = None
self.quicklist = None
self.indicator = None
#self._rename_methods = {
# Dbusmenu.Menuitem: self._rename_dbus_menu_item,
# Gtk.MenuItem: self._rename_gtk_menu_item,
#}
self.launcher_entry = None
self.quicklist = None
self.indicator = None
self._rename_methods = { }
self.telepathy_account_manager = TelepathyGLib.AccountManager.new(
TelepathyGLib.DBusDaemon.dup())
self.telepathy_account_manager.connect(
'most-available-presence-changed', self.notify_presence_change)
self.W.connect('notify::is-active', self.notify_window_state)
def _js(self, W, jscode):
if W:
windows = [W]
W.webview.execute_script(jscode)
else:
windows = self.W.popups
for win in windows:
win.webview.execute_script(jscode)
def _dispatch_dom_event(self, W, event, params):
js = 'var e = document.createEvent("Event"); e.initEvent("%s"); var params={};' % event
for k,v in params.items():
js = js + 'params.%s = "%s";' % (k, v,)
js = js + 'e.foggerData = params; document.dispatchEvent(e);'
self._js(W, js)
def _rename_gtk_menu_item(self, item, name):
item.props.label = name
def _rename_dbus_menu_item(self, item, name):
item.property_set(Dbusmenu.MENUITEM_PROP_LABEL, name)
def rename_item(self, W, data):
widget_id = data['id'][0]
item = self.widgets.get(widget_id)
rename = self._rename_methods.get(item.__class__)
if rename:
rename(item, data['name'][0])
def notify_window_state(self, window, active):
self._dispatch_dom_event(self.W, 'foggerWindowStateChange', {
'active': self.W.props.is_active})
def notify_presence_change(self, manager, presence, status, message):
self._dispatch_dom_event(None, 'foggerPresenceChange',
{'presence': TELEPATHY_PRESENCE_MAP.get(presence, 'offline')})
def get_presence(self, W, data):
presence = self.telepathy_account_manager.get_most_available_presence()
print W
def notify(self, W, data):
Notify.Notification.new(data.get('summary', [''])[0], data.get('body', [''])[0], self.icon_name).show()
def set_progress(self, W, data):
self.launcher_entry.props.progress = float(data['progress'][0])
self.launcher_entry.props.progress_visible = True
def clear_progress(self, W, data):
self.launcher_entry.props.progress_visible = False
def set_count(self, W, data):
self.launcher_entry.props.count = int(data['count'][0])
self.launcher_entry.props.count_visible = True
def clear_count(self, W, data):
self.launcher_entry.props.count_visible = False
def set_urgent(self, W, data):
self.launcher_entry.props.urgent = True
def clear_urgent(self, W, data):
self.launcher_entry.props.urgent = False
def add_launcher_action(self, W, data):
name = data['name'][0]
if name in self.launcher_actions:
return
item = self.launcher_actions[name] = Dbusmenu.Menuitem.new()
item.property_set(Dbusmenu.MENUITEM_PROP_LABEL, name)
item.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True)
item.connect('item-activated', lambda *a, **kw:
self._dispatch_dom_event(None, 'foggerQLCallbackEvent',
{'name': name}))
self.quicklist.child_append(item)
def remove_launcher_action(self, W, data):
name = data['name'][0]
action = self.launcher_actions.get(name)
if action:
self.quicklist.child_delete(action)
del self.launcher_actions[name]
def remove_launcher_actions(self, W, data):
for action in self.launcher_actions.values():
self.quicklist.child_delete(action)
self.launcher_actions = {}
def add_action(self, W, data):
action_path = data['name'][0]
if action_path in self.actions:
return
action_items = []
action_parts = [X for X in action_path.split('/') if X]
parent = self.W.menubar
length = len(action_parts)
for i, action in enumerate(action_parts, 1):
if i == length:
prepend = False
if i == 1:
parent = self.W.menubar.get_children()[0].get_submenu()
prepend = True
if [X for X in parent.get_children() if X.props.label == action]:
return
item = Gtk.MenuItem(action)
item.props.use_underline = True
if prepend:
parent.prepend(item)
else:
parent.append(item)
item.connect('activate', lambda *a, **kw:
self._dispatch_dom_event(W, 'foggerActionCallbackEvent',
{'name': action_path}))
item.show()
action_items.append(item)
else:
children = [X.props.label for X in parent.get_children()]
if action in children:
parent = [X for X in parent.get_children() if X.props.label == action][0].get_submenu()
action_items.append(parent)
else:
menu = Gtk.MenuItem(action)
menu.props.use_underline = True
menu.set_submenu(Gtk.Menu())
parent.append(menu)
parent = menu.get_submenu()
menu.show()
action_items.append(menu)
self.actions[action_path] = action_items
def remove_action(self, W, data):
action_path = data['name'][0]
action_items = self.actions.get(action_path)
if action_items:
for item in reversed(action_items):
if isinstance(item, Gtk.MenuItem):
submenu = item.get_submenu()
else:
submenu = item
if not submenu or not submenu.get_children():
action_items.remove(item)
item.destroy()
del self.actions[action_path]
def remove_actions(self, W, data):
for value in self.actions.values():
root = value[0]
root.destroy()
self.actions = {}
| gpl-3.0 |
rolandmansilla/microblog | flask/lib/python2.7/site-packages/werkzeug/posixemulation.py | 364 | 3519 | # -*- coding: utf-8 -*-
r"""
werkzeug.posixemulation
~~~~~~~~~~~~~~~~~~~~~~~
Provides a POSIX emulation for some features that are relevant to
web applications. The main purpose is to simplify support for
systems such as Windows NT that are not 100% POSIX compatible.
Currently this only implements a :func:`rename` function that
follows POSIX semantics. Eg: if the target file already exists it
will be replaced without asking.
This module was introduced in 0.6.1 and is not a public interface.
It might become one in later versions of Werkzeug.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import errno
import time
import random
from ._compat import to_unicode
from .filesystem import get_filesystem_encoding
can_rename_open_file = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
src = to_unicode(src, get_filesystem_encoding())
dst = to_unicode(dst, get_filesystem_encoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, ta)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
| bsd-3-clause |
lebabouin/CouchPotatoServer-develop | libs/sqlalchemy/orm/attributes.py | 18 | 51603 | # orm/attributes.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation for class attributes and their interaction
with instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import operator
from operator import itemgetter
from sqlalchemy import util, event, exc as sa_exc
from sqlalchemy.orm import interfaces, collections, events, exc as orm_exc
mapperutil = util.importlater("sqlalchemy.orm", "util")
PASSIVE_NO_RESULT = util.symbol('PASSIVE_NO_RESULT')
ATTR_WAS_SET = util.symbol('ATTR_WAS_SET')
ATTR_EMPTY = util.symbol('ATTR_EMPTY')
NO_VALUE = util.symbol('NO_VALUE')
NEVER_SET = util.symbol('NEVER_SET')
PASSIVE_RETURN_NEVER_SET = util.symbol('PASSIVE_RETURN_NEVER_SET',
"""Symbol indicating that loader callables can be
fired off, but if no callable is applicable and no value is
present, the attribute should remain non-initialized.
NEVER_SET is returned in this case.
""")
PASSIVE_NO_INITIALIZE = util.symbol('PASSIVE_NO_INITIALIZE',
"""Symbol indicating that loader callables should
not be fired off, and a non-initialized attribute
should remain that way.
""")
PASSIVE_NO_FETCH = util.symbol('PASSIVE_NO_FETCH',
"""Symbol indicating that loader callables should not emit SQL,
but a value can be fetched from the current session.
Non-initialized attributes should be initialized to an empty value.
""")
PASSIVE_NO_FETCH_RELATED = util.symbol('PASSIVE_NO_FETCH_RELATED',
"""Symbol indicating that loader callables should not emit SQL for
loading a related object, but can refresh the attributes of the local
instance in order to locate a related object in the current session.
Non-initialized attributes should be initialized to an empty value.
The unit of work uses this mode to check if history is present
on many-to-one attributes with minimal SQL emitted.
""")
PASSIVE_ONLY_PERSISTENT = util.symbol('PASSIVE_ONLY_PERSISTENT',
"""Symbol indicating that loader callables should only fire off for
parent objects which are persistent (i.e., have a database
identity).
Load operations for the "previous" value of an attribute make
use of this flag during change events.
""")
PASSIVE_OFF = util.symbol('PASSIVE_OFF',
"""Symbol indicating that loader callables should be executed
normally.
""")
class QueryableAttribute(interfaces.PropComparator):
"""Base class for class-bound attributes. """
def __init__(self, class_, key, impl=None,
comparator=None, parententity=None):
self.class_ = class_
self.key = key
self.impl = impl
self.comparator = comparator
self.parententity = parententity
manager = manager_of_class(class_)
# manager is None in the case of AliasedClass
if manager:
# propagate existing event listeners from
# immediate superclass
for base in manager._bases:
if key in base:
self.dispatch._update(base[key].dispatch)
dispatch = event.dispatcher(events.AttributeEvents)
dispatch.dispatch_cls._active_history = False
@util.memoized_property
def _supports_population(self):
return self.impl.supports_population
def get_history(self, instance, passive=PASSIVE_OFF):
return self.impl.get_history(instance_state(instance),
instance_dict(instance), passive)
def __selectable__(self):
# TODO: conditionally attach this method based on clause_element ?
return self
def __clause_element__(self):
return self.comparator.__clause_element__()
def label(self, name):
return self.__clause_element__().label(name)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def hasparent(self, state, optimistic=False):
return self.impl.hasparent(state, optimistic=optimistic) is not False
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
@util.memoized_property
def property(self):
return self.comparator.property
class InstrumentedAttribute(QueryableAttribute):
"""Class bound instrumented attribute which adds descriptor methods."""
def __set__(self, instance, value):
self.impl.set(instance_state(instance),
instance_dict(instance), value, None)
def __delete__(self, instance):
self.impl.delete(instance_state(instance), instance_dict(instance))
def __get__(self, instance, owner):
if instance is None:
return self
dict_ = instance_dict(instance)
if self._supports_population and self.key in dict_:
return dict_[self.key]
else:
return self.impl.get(instance_state(instance),dict_)
def create_proxied_attribute(descriptor):
"""Create an QueryableAttribute / user descriptor hybrid.
Returns a new QueryableAttribute type that delegates descriptor
behavior and getattr() to the given descriptor.
"""
# TODO: can move this to descriptor_props if the need for this
# function is removed from ext/hybrid.py
class Proxy(QueryableAttribute):
"""Presents the :class:`.QueryableAttribute` interface as a
proxy on top of a Python descriptor / :class:`.PropComparator`
combination.
"""
def __init__(self, class_, key, descriptor, comparator,
adapter=None, doc=None):
self.class_ = class_
self.key = key
self.descriptor = descriptor
self._comparator = comparator
self.adapter = adapter
self.__doc__ = doc
@property
def property(self):
return self.comparator.property
@util.memoized_property
def comparator(self):
if util.callable(self._comparator):
self._comparator = self._comparator()
if self.adapter:
self._comparator = self._comparator.adapted(self.adapter)
return self._comparator
def adapted(self, adapter):
"""Proxy adapted() for the use case of AliasedClass calling adapted."""
return self.__class__(self.class_, self.key, self.descriptor,
self._comparator,
adapter)
def __get__(self, instance, owner):
if instance is None:
return self
else:
return self.descriptor.__get__(instance, owner)
def __str__(self):
return self.key
def __getattr__(self, attribute):
"""Delegate __getattr__ to the original descriptor and/or
comparator."""
try:
return getattr(descriptor, attribute)
except AttributeError:
try:
return getattr(self.comparator, attribute)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(descriptor).__name__,
type(self.comparator).__name__,
attribute)
)
Proxy.__name__ = type(descriptor).__name__ + 'Proxy'
util.monkeypatch_proxied_specials(Proxy, type(descriptor),
name='descriptor',
from_instance=descriptor)
return Proxy
class AttributeImpl(object):
"""internal implementation for instrumented attributes."""
def __init__(self, class_, key,
callable_, dispatch, trackparent=False, extension=None,
compare_function=None, active_history=False,
parent_token=None, expire_missing=True,
**kwargs):
"""Construct an AttributeImpl.
\class_
associated class
key
string name of the attribute
\callable_
optional function which generates a callable based on a parent
instance, which produces the "default" values for a scalar or
collection attribute when it's first accessed, if not present
already.
trackparent
if True, attempt to track if an instance has a parent attached
to it via this attribute.
extension
a single or list of AttributeExtension object(s) which will
receive set/delete/append/remove/etc. events. Deprecated.
The event package is now used.
compare_function
a function that compares two values which are normally
assignable to this attribute.
active_history
indicates that get_history() should always return the "old" value,
even if it means executing a lazy callable upon attribute change.
parent_token
Usually references the MapperProperty, used as a key for
the hasparent() function to identify an "owning" attribute.
Allows multiple AttributeImpls to all match a single
owner attribute.
expire_missing
if False, don't add an "expiry" callable to this attribute
during state.expire_attributes(None), if no value is present
for this key.
"""
self.class_ = class_
self.key = key
self.callable_ = callable_
self.dispatch = dispatch
self.trackparent = trackparent
self.parent_token = parent_token or self
if compare_function is None:
self.is_equal = operator.eq
else:
self.is_equal = compare_function
# TODO: pass in the manager here
# instead of doing a lookup
attr = manager_of_class(class_)[key]
for ext in util.to_list(extension or []):
ext._adapt_listener(attr, ext)
if active_history:
self.dispatch._active_history = True
self.expire_missing = expire_missing
def _get_active_history(self):
"""Backwards compat for impl.active_history"""
return self.dispatch._active_history
def _set_active_history(self, value):
self.dispatch._active_history = value
active_history = property(_get_active_history, _set_active_history)
def hasparent(self, state, optimistic=False):
"""Return the boolean value of a `hasparent` flag attached to
the given state.
The `optimistic` flag determines what the default return value
should be if no `hasparent` flag can be located.
As this function is used to determine if an instance is an
*orphan*, instances that were loaded from storage should be
assumed to not be orphans, until a True/False value for this
flag is set.
An instance attribute that is loaded by a callable function
will also not have a `hasparent` flag.
"""
assert self.trackparent, "This AttributeImpl is not configured to track parents."
return state.parents.get(id(self.parent_token), optimistic) \
is not False
def sethasparent(self, state, parent_state, value):
"""Set a boolean flag on the given item corresponding to
whether or not it is attached to a parent object via the
attribute represented by this ``InstrumentedAttribute``.
"""
assert self.trackparent, "This AttributeImpl is not configured to track parents."
id_ = id(self.parent_token)
if value:
state.parents[id_] = parent_state
else:
if id_ in state.parents:
last_parent = state.parents[id_]
if last_parent is not False and \
last_parent.key != parent_state.key:
if last_parent.obj() is None:
raise orm_exc.StaleDataError(
"Removing state %s from parent "
"state %s along attribute '%s', "
"but the parent record "
"has gone stale, can't be sure this "
"is the most recent parent." %
(mapperutil.state_str(state),
mapperutil.state_str(parent_state),
self.key))
return
state.parents[id_] = False
def set_callable(self, state, callable_):
"""Set a callable function for this attribute on the given object.
This callable will be executed when the attribute is next
accessed, and is assumed to construct part of the instances
previously stored state. When its value or values are loaded,
they will be established as part of the instance's *committed
state*. While *trackparent* information will be assembled for
these instances, attribute-level event handlers will not be
fired.
The callable overrides the class level callable set in the
``InstrumentedAttribute`` constructor.
"""
state.callables[self.key] = callable_
def get_history(self, state, dict_, passive=PASSIVE_OFF):
raise NotImplementedError()
def get_all_pending(self, state, dict_):
"""Return a list of tuples of (state, obj)
for all objects in this attribute's current state
+ history.
Only applies to object-based attributes.
This is an inlining of existing functionality
which roughly corresponds to:
get_state_history(
state,
key,
passive=PASSIVE_NO_INITIALIZE).sum()
"""
raise NotImplementedError()
def initialize(self, state, dict_):
"""Initialize the given state's attribute with an empty value."""
dict_[self.key] = None
return None
def get(self, state, dict_, passive=PASSIVE_OFF):
"""Retrieve a value from the given object.
If a callable is assembled on this object's attribute, and
passive is False, the callable will be executed and the
resulting value will be set as the new value for this attribute.
"""
if self.key in dict_:
return dict_[self.key]
else:
# if history present, don't load
key = self.key
if key not in state.committed_state or \
state.committed_state[key] is NEVER_SET:
if passive is PASSIVE_NO_INITIALIZE:
return PASSIVE_NO_RESULT
if key in state.callables:
callable_ = state.callables[key]
value = callable_(passive)
elif self.callable_:
value = self.callable_(state, passive)
else:
value = ATTR_EMPTY
if value is PASSIVE_NO_RESULT or value is NEVER_SET:
return value
elif value is ATTR_WAS_SET:
try:
return dict_[key]
except KeyError:
# TODO: no test coverage here.
raise KeyError(
"Deferred loader for attribute "
"%r failed to populate "
"correctly" % key)
elif value is not ATTR_EMPTY:
return self.set_committed_value(state, dict_, value)
if passive is PASSIVE_RETURN_NEVER_SET:
return NEVER_SET
else:
# Return a new, empty value
return self.initialize(state, dict_)
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, value, initiator, passive=passive)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, None, initiator,
passive=passive, check_old=value)
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, None, initiator,
passive=passive, check_old=value, pop=True)
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
raise NotImplementedError()
def get_committed_value(self, state, dict_, passive=PASSIVE_OFF):
"""return the unchanged value of this attribute"""
if self.key in state.committed_state:
value = state.committed_state[self.key]
if value is NO_VALUE:
return None
else:
return value
else:
return self.get(state, dict_, passive=passive)
def set_committed_value(self, state, dict_, value):
"""set an attribute value on the given instance and 'commit' it."""
dict_[self.key] = value
state.commit(dict_, [self.key])
return value
class ScalarAttributeImpl(AttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute."""
accepts_scalar_loader = True
uses_objects = False
supports_population = True
def delete(self, state, dict_):
# TODO: catch key errors, convert to attributeerror?
if self.dispatch._active_history:
old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET)
else:
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.remove:
self.fire_remove_event(state, dict_, old, None)
state.modified_event(dict_, self, old)
del dict_[self.key]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
return History.from_scalar_attribute(
self, state, dict_.get(self.key, NO_VALUE))
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
if initiator and initiator.parent_token is self.parent_token:
return
if self.dispatch._active_history:
old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET)
else:
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.set:
value = self.fire_replace_event(state, dict_,
value, old, initiator)
state.modified_event(dict_, self, old)
dict_[self.key] = value
def fire_replace_event(self, state, dict_, value, previous, initiator):
for fn in self.dispatch.set:
value = fn(state, value, previous, initiator or self)
return value
def fire_remove_event(self, state, dict_, value, initiator):
for fn in self.dispatch.remove:
fn(state, value, initiator or self)
@property
def type(self):
self.property.columns[0].type
class MutableScalarAttributeImpl(ScalarAttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute, which can
detect changes within the value itself.
"""
uses_objects = False
supports_population = True
def __init__(self, class_, key, callable_, dispatch,
class_manager, copy_function=None,
compare_function=None, **kwargs):
super(ScalarAttributeImpl, self).__init__(
class_,
key,
callable_, dispatch,
compare_function=compare_function,
**kwargs)
class_manager.mutable_attributes.add(key)
if copy_function is None:
raise sa_exc.ArgumentError(
"MutableScalarAttributeImpl requires a copy function")
self.copy = copy_function
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if not dict_:
v = state.committed_state.get(self.key, NO_VALUE)
else:
v = dict_.get(self.key, NO_VALUE)
return History.from_scalar_attribute(self, state, v)
def check_mutable_modified(self, state, dict_):
a, u, d = self.get_history(state, dict_)
return bool(a or d)
def get(self, state, dict_, passive=PASSIVE_OFF):
if self.key not in state.mutable_dict:
ret = ScalarAttributeImpl.get(self, state, dict_, passive=passive)
if ret is not PASSIVE_NO_RESULT:
state.mutable_dict[self.key] = ret
return ret
else:
return state.mutable_dict[self.key]
def delete(self, state, dict_):
ScalarAttributeImpl.delete(self, state, dict_)
state.mutable_dict.pop(self.key)
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
ScalarAttributeImpl.set(self, state, dict_, value,
initiator, passive, check_old=check_old, pop=pop)
state.mutable_dict[self.key] = value
class ScalarObjectAttributeImpl(ScalarAttributeImpl):
"""represents a scalar-holding InstrumentedAttribute,
where the target object is also instrumented.
Adds events to delete/set operations.
"""
accepts_scalar_loader = False
uses_objects = True
supports_population = True
def delete(self, state, dict_):
old = self.get(state, dict_)
self.fire_remove_event(state, dict_, old, self)
del dict_[self.key]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if self.key in dict_:
return History.from_object_attribute(self, state, dict_[self.key])
else:
if passive is PASSIVE_OFF:
passive = PASSIVE_RETURN_NEVER_SET
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_object_attribute(self, state, current)
def get_all_pending(self, state, dict_):
if self.key in dict_:
current = dict_[self.key]
if current is not None:
ret = [(instance_state(current), current)]
else:
ret = [(None, None)]
if self.key in state.committed_state:
original = state.committed_state[self.key]
if original not in (NEVER_SET, PASSIVE_NO_RESULT, None) and \
original is not current:
ret.append((instance_state(original), original))
return ret
else:
return []
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
"""Set a value on the given InstanceState.
`initiator` is the ``InstrumentedAttribute`` that initiated the
``set()`` operation and is used to control the depth of a circular
setter operation.
"""
if initiator and initiator.parent_token is self.parent_token:
return
if self.dispatch._active_history:
old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT)
else:
old = self.get(state, dict_, passive=PASSIVE_NO_FETCH)
if check_old is not None and \
old is not PASSIVE_NO_RESULT and \
check_old is not old:
if pop:
return
else:
raise ValueError(
"Object %s not associated with %s on attribute '%s'" % (
mapperutil.instance_str(check_old),
mapperutil.state_str(state),
self.key
))
value = self.fire_replace_event(state, dict_, value, old, initiator)
dict_[self.key] = value
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self)
state.modified_event(dict_, self, value)
def fire_replace_event(self, state, dict_, value, previous, initiator):
if self.trackparent:
if (previous is not value and
previous is not None and
previous is not PASSIVE_NO_RESULT):
self.sethasparent(instance_state(previous), state, False)
for fn in self.dispatch.set:
value = fn(state, value, previous, initiator or self)
state.modified_event(dict_, self, previous)
if self.trackparent:
if value is not None:
self.sethasparent(instance_state(value), state, True)
return value
class CollectionAttributeImpl(AttributeImpl):
"""A collection-holding attribute that instruments changes in membership.
Only handles collections of instrumented objects.
InstrumentedCollectionAttribute holds an arbitrary, user-specified
container object (defaulting to a list) and brokers access to the
CollectionAdapter, a "view" onto that object that presents consistent bag
semantics to the orm layer independent of the user data implementation.
"""
accepts_scalar_loader = False
uses_objects = True
supports_population = True
def __init__(self, class_, key, callable_, dispatch,
typecallable=None, trackparent=False, extension=None,
copy_function=None, compare_function=None, **kwargs):
super(CollectionAttributeImpl, self).__init__(
class_,
key,
callable_, dispatch,
trackparent=trackparent,
extension=extension,
compare_function=compare_function,
**kwargs)
if copy_function is None:
copy_function = self.__copy
self.copy = copy_function
self.collection_factory = typecallable
def __copy(self, item):
return [y for y in list(collections.collection_adapter(item))]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_collection(self, state, current)
def get_all_pending(self, state, dict_):
if self.key not in dict_:
return []
current = dict_[self.key]
current = getattr(current, '_sa_adapter')
if self.key in state.committed_state:
original = state.committed_state[self.key]
if original is not NO_VALUE:
current_states = [((c is not None) and
instance_state(c) or None, c)
for c in current]
original_states = [((c is not None) and
instance_state(c) or None, c)
for c in original]
current_set = dict(current_states)
original_set = dict(original_states)
return \
[(s, o) for s, o in current_states if s not in original_set] + \
[(s, o) for s, o in current_states if s in original_set] + \
[(s, o) for s, o in original_states if s not in current_set]
return [(instance_state(o), o) for o in current]
def fire_append_event(self, state, dict_, value, initiator):
for fn in self.dispatch.append:
value = fn(state, value, initiator or self)
state.modified_event(dict_, self, NEVER_SET, True)
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, True)
return value
def fire_pre_remove_event(self, state, dict_, initiator):
state.modified_event(dict_, self, NEVER_SET, True)
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self)
state.modified_event(dict_, self, NEVER_SET, True)
def delete(self, state, dict_):
if self.key not in dict_:
return
state.modified_event(dict_, self, NEVER_SET, True)
collection = self.get_collection(state, state.dict)
collection.clear_with_event()
# TODO: catch key errors, convert to attributeerror?
del dict_[self.key]
def initialize(self, state, dict_):
"""Initialize this attribute with an empty collection."""
_, user_data = self._initialize_collection(state)
dict_[self.key] = user_data
return user_data
def _initialize_collection(self, state):
return state.manager.initialize_collection(
self.key, state, self.collection_factory)
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
if initiator and initiator.parent_token is self.parent_token:
return
collection = self.get_collection(state, dict_, passive=passive)
if collection is PASSIVE_NO_RESULT:
value = self.fire_append_event(state, dict_, value, initiator)
assert self.key not in dict_, \
"Collection was loaded during event handling."
state.get_pending(self.key).append(value)
else:
collection.append_with_event(value, initiator)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
if initiator and initiator.parent_token is self.parent_token:
return
collection = self.get_collection(state, state.dict, passive=passive)
if collection is PASSIVE_NO_RESULT:
self.fire_remove_event(state, dict_, value, initiator)
assert self.key not in dict_, \
"Collection was loaded during event handling."
state.get_pending(self.key).remove(value)
else:
collection.remove_with_event(value, initiator)
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
try:
# TODO: better solution here would be to add
# a "popper" role to collections.py to complement
# "remover".
self.remove(state, dict_, value, initiator, passive=passive)
except (ValueError, KeyError, IndexError):
pass
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, pop=False):
"""Set a value on the given object.
`initiator` is the ``InstrumentedAttribute`` that initiated the
``set()`` operation and is used to control the depth of a circular
setter operation.
"""
if initiator and initiator.parent_token is self.parent_token:
return
self._set_iterable(
state, dict_, value,
lambda adapter, i: adapter.adapt_like_to_iterable(i))
def _set_iterable(self, state, dict_, iterable, adapter=None):
"""Set a collection value from an iterable of state-bearers.
``adapter`` is an optional callable invoked with a CollectionAdapter
and the iterable. Should return an iterable of state-bearing
instances suitable for appending via a CollectionAdapter. Can be used
for, e.g., adapting an incoming dictionary into an iterator of values
rather than keys.
"""
# pulling a new collection first so that an adaptation exception does
# not trigger a lazy load of the old collection.
new_collection, user_data = self._initialize_collection(state)
if adapter:
new_values = list(adapter(new_collection, iterable))
else:
new_values = list(iterable)
old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT)
if old is PASSIVE_NO_RESULT:
old = self.initialize(state, dict_)
elif old is iterable:
# ignore re-assignment of the current collection, as happens
# implicitly with in-place operators (foo.collection |= other)
return
# place a copy of "old" in state.committed_state
state.modified_event(dict_, self, old, True)
old_collection = getattr(old, '_sa_adapter')
dict_[self.key] = user_data
collections.bulk_replace(new_values, old_collection, new_collection)
old_collection.unlink(old)
def set_committed_value(self, state, dict_, value):
"""Set an attribute value on the given instance and 'commit' it."""
collection, user_data = self._initialize_collection(state)
if value:
collection.append_multiple_without_event(value)
state.dict[self.key] = user_data
state.commit(dict_, [self.key])
if self.key in state.pending:
# pending items exist. issue a modified event,
# add/remove new items.
state.modified_event(dict_, self, user_data, True)
pending = state.pending.pop(self.key)
added = pending.added_items
removed = pending.deleted_items
for item in added:
collection.append_without_event(item)
for item in removed:
collection.remove_without_event(item)
return user_data
def get_collection(self, state, dict_,
user_data=None, passive=PASSIVE_OFF):
"""Retrieve the CollectionAdapter associated with the given state.
Creates a new CollectionAdapter if one does not exist.
"""
if user_data is None:
user_data = self.get(state, dict_, passive=passive)
if user_data is PASSIVE_NO_RESULT:
return user_data
return getattr(user_data, '_sa_adapter')
def backref_listeners(attribute, key, uselist):
"""Apply listeners to synchronize a two-way relationship."""
# use easily recognizable names for stack traces
parent_token = attribute.impl.parent_token
def _acceptable_key_err(child_state, initiator):
raise ValueError(
"Object %s not associated with attribute of "
"type %s" % (mapperutil.state_str(child_state),
manager_of_class(initiator.class_)[initiator.key]))
def emit_backref_from_scalar_set_event(state, child, oldchild, initiator):
if oldchild is child:
return child
if oldchild is not None and oldchild is not PASSIVE_NO_RESULT:
# With lazy=None, there's no guarantee that the full collection is
# present when updating via a backref.
old_state, old_dict = instance_state(oldchild),\
instance_dict(oldchild)
impl = old_state.manager[key].impl
impl.pop(old_state,
old_dict,
state.obj(),
initiator, passive=PASSIVE_NO_FETCH)
if child is not None:
child_state, child_dict = instance_state(child),\
instance_dict(child)
child_impl = child_state.manager[key].impl
if initiator.parent_token is not parent_token and \
initiator.parent_token is not child_impl.parent_token:
_acceptable_key_err(state, initiator)
child_impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
return child
def emit_backref_from_collection_append_event(state, child, initiator):
child_state, child_dict = instance_state(child), \
instance_dict(child)
child_impl = child_state.manager[key].impl
if initiator.parent_token is not parent_token and \
initiator.parent_token is not child_impl.parent_token:
_acceptable_key_err(state, initiator)
child_impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
return child
def emit_backref_from_collection_remove_event(state, child, initiator):
if child is not None:
child_state, child_dict = instance_state(child),\
instance_dict(child)
child_impl = child_state.manager[key].impl
# can't think of a path that would produce an initiator
# mismatch here, as it would require an existing collection
# mismatch.
child_impl.pop(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
if uselist:
event.listen(attribute, "append",
emit_backref_from_collection_append_event,
retval=True, raw=True)
else:
event.listen(attribute, "set",
emit_backref_from_scalar_set_event,
retval=True, raw=True)
# TODO: need coverage in test/orm/ of remove event
event.listen(attribute, "remove",
emit_backref_from_collection_remove_event,
retval=True, raw=True)
_NO_HISTORY = util.symbol('NO_HISTORY')
_NO_STATE_SYMBOLS = frozenset([
id(PASSIVE_NO_RESULT),
id(NO_VALUE),
id(NEVER_SET)])
class History(tuple):
"""A 3-tuple of added, unchanged and deleted values,
representing the changes which have occurred on an instrumented
attribute.
Each tuple member is an iterable sequence.
"""
__slots__ = ()
added = property(itemgetter(0))
"""Return the collection of items added to the attribute (the first tuple
element)."""
unchanged = property(itemgetter(1))
"""Return the collection of items that have not changed on the attribute
(the second tuple element)."""
deleted = property(itemgetter(2))
"""Return the collection of items that have been removed from the
attribute (the third tuple element)."""
def __new__(cls, added, unchanged, deleted):
return tuple.__new__(cls, (added, unchanged, deleted))
def __nonzero__(self):
return self != HISTORY_BLANK
def empty(self):
"""Return True if this :class:`.History` has no changes
and no existing, unchanged state.
"""
return not bool(
(self.added or self.deleted)
or self.unchanged and self.unchanged != [None]
)
def sum(self):
"""Return a collection of added + unchanged + deleted."""
return (self.added or []) +\
(self.unchanged or []) +\
(self.deleted or [])
def non_deleted(self):
"""Return a collection of added + unchanged."""
return (self.added or []) +\
(self.unchanged or [])
def non_added(self):
"""Return a collection of unchanged + deleted."""
return (self.unchanged or []) +\
(self.deleted or [])
def has_changes(self):
"""Return True if this :class:`.History` has changes."""
return bool(self.added or self.deleted)
def as_state(self):
return History(
[(c is not None)
and instance_state(c) or None
for c in self.added],
[(c is not None)
and instance_state(c) or None
for c in self.unchanged],
[(c is not None)
and instance_state(c) or None
for c in self.deleted],
)
@classmethod
def from_scalar_attribute(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if original is _NO_HISTORY:
if current is NO_VALUE:
return cls((), (), ())
else:
return cls((), [current], ())
# don't let ClauseElement expressions here trip things up
elif attribute.is_equal(current, original) is True:
return cls((), [current], ())
else:
# current convention on native scalars is to not
# include information
# about missing previous value in "deleted", but
# we do include None, which helps in some primary
# key situations
if id(original) in _NO_STATE_SYMBOLS:
deleted = ()
else:
deleted = [original]
if current is NO_VALUE:
return cls((), (), deleted)
else:
return cls([current], (), deleted)
@classmethod
def from_object_attribute(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if original is _NO_HISTORY:
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), ())
else:
return cls((), [current], ())
elif current is original:
return cls((), [current], ())
else:
# current convention on related objects is to not
# include information
# about missing previous value in "deleted", and
# to also not include None - the dependency.py rules
# ignore the None in any case.
if id(original) in _NO_STATE_SYMBOLS or original is None:
deleted = ()
else:
deleted = [original]
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), deleted)
else:
return cls([current], (), deleted)
@classmethod
def from_collection(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
current = getattr(current, '_sa_adapter')
if original is NO_VALUE:
return cls(list(current), (), ())
elif original is _NO_HISTORY:
return cls((), list(current), ())
else:
current_states = [((c is not None) and instance_state(c) or None, c)
for c in current
]
original_states = [((c is not None) and instance_state(c) or None, c)
for c in original
]
current_set = dict(current_states)
original_set = dict(original_states)
return cls(
[o for s, o in current_states if s not in original_set],
[o for s, o in current_states if s in original_set],
[o for s, o in original_states if s not in current_set]
)
HISTORY_BLANK = History(None, None, None)
def get_history(obj, key, passive=PASSIVE_OFF):
"""Return a :class:`.History` record for the given object
and attribute key.
:param obj: an object whose class is instrumented by the
attributes package.
:param key: string attribute name.
:param passive: indicates if the attribute should be
loaded from the database if not already present (:attr:`.PASSIVE_NO_FETCH`), and
if the attribute should be not initialized to a blank value otherwise
(:attr:`.PASSIVE_NO_INITIALIZE`). Default is :attr:`PASSIVE_OFF`.
"""
if passive is True:
util.warn_deprecated("Passing True for 'passive' is deprecated. "
"Use attributes.PASSIVE_NO_INITIALIZE")
passive = PASSIVE_NO_INITIALIZE
elif passive is False:
util.warn_deprecated("Passing False for 'passive' is "
"deprecated. Use attributes.PASSIVE_OFF")
passive = PASSIVE_OFF
return get_state_history(instance_state(obj), key, passive)
def get_state_history(state, key, passive=PASSIVE_OFF):
return state.get_history(key, passive)
def has_parent(cls, obj, key, optimistic=False):
"""TODO"""
manager = manager_of_class(cls)
state = instance_state(obj)
return manager.has_parent(state, key, optimistic)
def register_attribute(class_, key, **kw):
comparator = kw.pop('comparator', None)
parententity = kw.pop('parententity', None)
doc = kw.pop('doc', None)
desc = register_descriptor(class_, key,
comparator, parententity, doc=doc)
register_attribute_impl(class_, key, **kw)
return desc
def register_attribute_impl(class_, key,
uselist=False, callable_=None,
useobject=False, mutable_scalars=False,
impl_class=None, backref=None, **kw):
manager = manager_of_class(class_)
if uselist:
factory = kw.pop('typecallable', None)
typecallable = manager.instrument_collection_class(
key, factory or list)
else:
typecallable = kw.pop('typecallable', None)
dispatch = manager[key].dispatch
if impl_class:
impl = impl_class(class_, key, typecallable, dispatch, **kw)
elif uselist:
impl = CollectionAttributeImpl(class_, key, callable_, dispatch,
typecallable=typecallable, **kw)
elif useobject:
impl = ScalarObjectAttributeImpl(class_, key, callable_,
dispatch,**kw)
elif mutable_scalars:
impl = MutableScalarAttributeImpl(class_, key, callable_, dispatch,
class_manager=manager, **kw)
else:
impl = ScalarAttributeImpl(class_, key, callable_, dispatch, **kw)
manager[key].impl = impl
if backref:
backref_listeners(manager[key], backref, uselist)
manager.post_configure_attribute(key)
return manager[key]
def register_descriptor(class_, key, comparator=None,
parententity=None, doc=None):
manager = manager_of_class(class_)
descriptor = InstrumentedAttribute(class_, key, comparator=comparator,
parententity=parententity)
descriptor.__doc__ = doc
manager.instrument_attribute(key, descriptor)
return descriptor
def unregister_attribute(class_, key):
manager_of_class(class_).uninstrument_attribute(key)
def init_collection(obj, key):
"""Initialize a collection attribute and return the collection adapter.
This function is used to provide direct access to collection internals
for a previously unloaded attribute. e.g.::
collection_adapter = init_collection(someobject, 'elements')
for elem in values:
collection_adapter.append_without_event(elem)
For an easier way to do the above, see
:func:`~sqlalchemy.orm.attributes.set_committed_value`.
obj is an instrumented object instance. An InstanceState
is accepted directly for backwards compatibility but
this usage is deprecated.
"""
state = instance_state(obj)
dict_ = state.dict
return init_state_collection(state, dict_, key)
def init_state_collection(state, dict_, key):
"""Initialize a collection attribute and return the collection adapter."""
attr = state.manager[key].impl
user_data = attr.initialize(state, dict_)
return attr.get_collection(state, dict_, user_data)
def set_committed_value(instance, key, value):
"""Set the value of an attribute with no history events.
Cancels any previous history present. The value should be
a scalar value for scalar-holding attributes, or
an iterable for any collection-holding attribute.
This is the same underlying method used when a lazy loader
fires off and loads additional data from the database.
In particular, this method can be used by application code
which has loaded additional attributes or collections through
separate queries, which can then be attached to an instance
as though it were part of its original loaded state.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.set_committed_value(state, dict_, value)
def set_attribute(instance, key, value):
"""Set the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.set(state, dict_, value, None)
def get_attribute(instance, key):
"""Get the value of an attribute, firing any callables required.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to make usage of attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
return state.manager[key].impl.get(state, dict_)
def del_attribute(instance, key):
"""Delete the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.delete(state, dict_)
def flag_modified(instance, key):
"""Mark an attribute on an instance as 'modified'.
This sets the 'modified' flag on the instance and
establishes an unconditional change event for the given attribute.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
impl = state.manager[key].impl
state.modified_event(dict_, impl, NO_VALUE)
| gpl-3.0 |
caesar2164/edx-platform | common/lib/xmodule/xmodule/tests/test_bulk_assertions.py | 173 | 5627 | import ddt
import itertools
from xmodule.tests import BulkAssertionTest, BulkAssertionError
STATIC_PASSING_ASSERTIONS = (
('assertTrue', True),
('assertFalse', False),
('assertIs', 1, 1),
('assertEqual', 1, 1),
('assertEquals', 1, 1),
('assertIsNot', 1, 2),
('assertIsNone', None),
('assertIsNotNone', 1),
('assertIn', 1, (1, 2, 3)),
('assertNotIn', 5, (1, 2, 3)),
('assertIsInstance', 1, int),
('assertNotIsInstance', '1', int),
('assertItemsEqual', [1, 2, 3], [3, 2, 1])
)
STATIC_FAILING_ASSERTIONS = (
('assertTrue', False),
('assertFalse', True),
('assertIs', 1, 2),
('assertEqual', 1, 2),
('assertEquals', 1, 2),
('assertIsNot', 1, 1),
('assertIsNone', 1),
('assertIsNotNone', None),
('assertIn', 5, (1, 2, 3)),
('assertNotIn', 1, (1, 2, 3)),
('assertIsInstance', '1', int),
('assertNotIsInstance', 1, int),
('assertItemsEqual', [1, 1, 1], [1, 1])
)
CONTEXT_PASSING_ASSERTIONS = (
('assertRaises', KeyError, {}.__getitem__, '1'),
('assertRaisesRegexp', KeyError, "1", {}.__getitem__, '1'),
)
CONTEXT_FAILING_ASSERTIONS = (
('assertRaises', ValueError, lambda: None),
('assertRaisesRegexp', KeyError, "2", {}.__getitem__, '1'),
)
@ddt.ddt
class TestBulkAssertionTestCase(BulkAssertionTest):
# We have to use assertion methods from the base UnitTest class,
# so we make a number of super calls that skip BulkAssertionTest.
# pylint: disable=bad-super-call
def _run_assertion(self, assertion_tuple):
"""
Run the supplied tuple of (assertion, *args) as a method on this class.
"""
assertion, args = assertion_tuple[0], assertion_tuple[1:]
getattr(self, assertion)(*args)
def _raw_assert(self, assertion_name, *args, **kwargs):
"""
Run an un-modified assertion.
"""
# Use super(BulkAssertionTest) to make sure we get un-adulturated assertions
return getattr(super(BulkAssertionTest, self), 'assert' + assertion_name)(*args, **kwargs)
@ddt.data(*(STATIC_PASSING_ASSERTIONS + CONTEXT_PASSING_ASSERTIONS))
def test_passing_asserts_passthrough(self, assertion_tuple):
self._run_assertion(assertion_tuple)
@ddt.data(*(STATIC_FAILING_ASSERTIONS + CONTEXT_FAILING_ASSERTIONS))
def test_failing_asserts_passthrough(self, assertion_tuple):
with self._raw_assert('Raises', AssertionError) as context:
self._run_assertion(assertion_tuple)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
@ddt.data(*CONTEXT_PASSING_ASSERTIONS)
@ddt.unpack
def test_passing_context_assertion_passthrough(self, assertion, *args):
assertion_args = []
args = list(args)
exception = args.pop(0)
while not callable(args[0]):
assertion_args.append(args.pop(0))
function = args.pop(0)
with getattr(self, assertion)(exception, *assertion_args):
function(*args)
@ddt.data(*CONTEXT_FAILING_ASSERTIONS)
@ddt.unpack
def test_failing_context_assertion_passthrough(self, assertion, *args):
assertion_args = []
args = list(args)
exception = args.pop(0)
while not callable(args[0]):
assertion_args.append(args.pop(0))
function = args.pop(0)
with self._raw_assert('Raises', AssertionError) as context:
with getattr(self, assertion)(exception, *assertion_args):
function(*args)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
@ddt.data(*list(itertools.product(
CONTEXT_PASSING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_bulk_assert(self, passing_assertion, failing_assertion1, failing_assertion2):
contextmanager = self.bulk_assertions()
contextmanager.__enter__()
self._run_assertion(passing_assertion)
self._run_assertion(failing_assertion1)
self._run_assertion(failing_assertion2)
with self._raw_assert('Raises', BulkAssertionError) as context:
contextmanager.__exit__(None, None, None)
self._raw_assert('Equals', len(context.exception.errors), 2)
@ddt.data(*list(itertools.product(
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_nested_bulk_asserts(self, failing_assertion):
with self._raw_assert('Raises', BulkAssertionError) as context:
with self.bulk_assertions():
self._run_assertion(failing_assertion)
with self.bulk_assertions():
self._run_assertion(failing_assertion)
self._run_assertion(failing_assertion)
self._raw_assert('Equal', len(context.exception.errors), 3)
@ddt.data(*list(itertools.product(
CONTEXT_PASSING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_bulk_assert_closed(self, passing_assertion, failing_assertion1, failing_assertion2):
with self._raw_assert('Raises', BulkAssertionError) as context:
with self.bulk_assertions():
self._run_assertion(passing_assertion)
self._run_assertion(failing_assertion1)
self._raw_assert('Equals', len(context.exception.errors), 1)
with self._raw_assert('Raises', AssertionError) as context:
self._run_assertion(failing_assertion2)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
| agpl-3.0 |
konstruktoid/ansible-upstream | lib/ansible/plugins/cliconf/ios.py | 3 | 10305 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import re
import time
import json
from itertools import chain
from ansible.module_utils._text import to_text
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase, enable_mode
class Cliconf(CliconfBase):
@enable_mode
def get_config(self, source='running', filter=None, format='text'):
if source not in ('running', 'startup'):
return self.invalid_params("fetching configuration from %s is not supported" % source)
if not filter:
filter = []
if source == 'running':
cmd = 'show running-config '
else:
cmd = 'show startup-config '
cmd += ' '.join(to_list(filter))
cmd = cmd.strip()
return self.send_command(cmd)
def get_diff(self, candidate=None, running=None, match='line', diff_ignore_lines=None, path=None, replace='line'):
"""
Generate diff between candidate and running configuration. If the
remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
candidate and running configurations are not required to be passed as argument.
In case if onbox diff capability is not supported candidate argument is mandatory
and running argument is optional.
:param candidate: The configuration which is expected to be present on remote host.
:param running: The base configuration which is used to generate diff.
:param match: Instructs how to match the candidate configuration with current device configuration
Valid values are 'line', 'strict', 'exact', 'none'.
'line' - commands are matched line by line
'strict' - command lines are matched with respect to position
'exact' - command lines must be an equal match
'none' - will not compare the candidate configuration with
the running configuration on the remote device
:param diff_ignore_lines: Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
:param path: The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
:param replace: Instructs on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are
pushed to the device in configuration mode. If the replace argument is
set to I(block) then the entire command block is pushed to the device in
configuration mode if any line is not correct.
:return: Configuration diff in json format.
{
'config_diff': '',
'banner_diff': ''
}
"""
diff = {}
device_operations = self.get_device_operations()
if candidate is None and not device_operations['supports_onbox_diff']:
raise ValueError('candidate configuration is required to generate diff')
# prepare candidate configuration
candidate_obj = NetworkConfig(indent=1)
want_src, want_banners = self._extract_banners(candidate)
candidate_obj.load(want_src)
if running and match != 'none':
# running configuration
have_src, have_banners = self._extract_banners(running)
running_obj = NetworkConfig(indent=1, contents=have_src, ignore_lines=diff_ignore_lines)
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=match, replace=replace)
else:
configdiffobjs = candidate_obj.items
have_banners = {}
configdiff = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
diff['config_diff'] = configdiff if configdiffobjs else {}
banners = self._diff_banners(want_banners, have_banners)
diff['banner_diff'] = banners if banners else {}
return json.dumps(diff)
@enable_mode
def edit_config(self, candidate, check_mode=False, replace=None):
if not candidate:
raise ValueError('must provide a candidate config to load')
if check_mode not in (True, False):
raise ValueError('`check_mode` must be a bool, got %s' % check_mode)
options = self.get_option_values()
if replace and replace not in options['replace']:
raise ValueError('`replace` value %s in invalid, valid values are %s' % (replace, options['replace']))
results = []
if not check_mode:
for line in chain(['configure terminal'], to_list(candidate)):
if not isinstance(line, collections.Mapping):
line = {'command': line}
cmd = line['command']
if cmd != 'end' and cmd[0] != '!':
results.append(self.send_command(**line))
results.append(self.send_command('end'))
return results[1:-1]
def get(self, command, prompt=None, answer=None, sendonly=False):
return self.send_command(command, prompt=prompt, answer=answer, sendonly=sendonly)
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'ios'
reply = self.get('show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Version (\S+)', data)
if match:
device_info['network_os_version'] = match.group(1).strip(',')
match = re.search(r'^Cisco (.+) \(revision', data, re.M)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'^(.+) uptime', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
def get_device_operations(self):
return {
'supports_replace': True,
'supports_commit': False,
'supports_rollback': False,
'supports_defaults': True,
'supports_onbox_diff': False,
'supports_commit_comment': False,
'supports_multiline_delimiter': False,
'support_match': True,
'support_diff_ignore_lines': True,
'supports_generate_diff': True,
}
def get_option_values(self):
return {
'format': ['text'],
'match': ['line', 'strict', 'exact', 'none'],
'replace': ['line', 'block']
}
def get_capabilities(self):
result = dict()
result['rpc'] = self.get_base_rpc() + ['edit_banner']
result['network_api'] = 'cliconf'
result['device_info'] = self.get_device_info()
result['device_operations'] = self.get_device_operations()
result.update(self.get_option_values())
return json.dumps(result)
def edit_banner(self, banners, multiline_delimiter="@", check_mode=False):
"""
Edit banner on remote device
:param banners: Banners to be loaded in json format
:param multiline_delimiter: Line delimiter for banner
:param check_mode: Boolean value that indicates if the device candidate
configuration should be pushed in the running configuration or discarded.
:return: Returns response of executing the configuration command received
from remote host
"""
banners_obj = json.loads(banners)
results = []
if not check_mode:
for key, value in iteritems(banners_obj):
key += ' %s' % multiline_delimiter
for cmd in ['config terminal', key, value, multiline_delimiter, 'end']:
obj = {'command': cmd, 'sendonly': True}
results.append(self.send_command(**obj))
time.sleep(0.1)
results.append(self.send_command('\n'))
return results[1:-1]
def _extract_banners(self, config):
banners = {}
banner_cmds = re.findall(r'^banner (\w+)', config, re.M)
for cmd in banner_cmds:
regex = r'banner %s \^C(.+?)(?=\^C)' % cmd
match = re.search(regex, config, re.S)
if match:
key = 'banner %s' % cmd
banners[key] = match.group(1).strip()
for cmd in banner_cmds:
regex = r'banner %s \^C(.+?)(?=\^C)' % cmd
match = re.search(regex, config, re.S)
if match:
config = config.replace(str(match.group(1)), '')
config = re.sub(r'banner \w+ \^C\^C', '!! banner removed', config)
return config, banners
def _diff_banners(self, want, have):
candidate = {}
for key, value in iteritems(want):
if value != have.get(key):
candidate[key] = value
return candidate
| gpl-3.0 |
akrherz/iem | scripts/coop/month_precip.py | 1 | 2924 | """
Monthly precip something
"""
import datetime
import subprocess
import os
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn, logger
LOG = logger()
def main():
"""Go Main Go"""
nt = NetworkTable("IA_COOP")
iem_pgconn = get_dbconn("iem", user="nobody")
icursor = iem_pgconn.cursor()
coop_pgconn = get_dbconn("coop", user="nobody")
ccursor = coop_pgconn.cursor()
o = open("IEMNWSMPR.txt", "w")
o.write("IEMNWSMPR\n")
o.write("IOWA ENVIRONMENTAL MESONET\n")
o.write(" NWS COOP STATION MONTH PRECIPITATION TOTALS\n")
o.write(" AS CALCULATED ON THE IEM SERVER ...NOTE THE OBS COUNT...\n")
now = datetime.datetime.now()
# Now we load climatology
mrain = {}
ccursor.execute(
"""
select station, sum(precip) as rain from climate WHERE
extract(month from valid) = %s and extract(day from valid) <= %s
GROUP by station
"""
% (now.month, now.day)
)
for row in ccursor:
mrain[row[0]] = row[1]
o.write(
(" VALID FOR MONTH OF: %s\n\n") % (now.strftime("%d %B %Y").upper(),)
)
o.write(
"%-6s%-24.24s%9s%10s%11s%10s\n"
% ("ID", "STATION", "REPORTS", "PREC (IN)", "CLIMO2DATE", "DIFF")
)
icursor.execute(
"""
SELECT id, count(id) as cnt,
sum(CASE WHEN pday >= 0 THEN pday ELSE 0 END) as prectot
from summary_%s s JOIN stations t on (t.iemid = s.iemid)
WHERE date_part('month', day) =
date_part('month', CURRENT_TIMESTAMP::date)
and pday >= 0 and t.network = 'IA_COOP' GROUP by id
"""
% (now.year,)
)
d = {}
for row in icursor:
thisStation = row[0]
thisPrec = row[2]
thisCount = row[1]
if thisStation in nt.sts:
climate_site = nt.sts[thisStation]["climate_site"]
if climate_site not in mrain:
LOG.debug("climate_site has no data: %s", climate_site)
continue
d[thisStation] = {"prectot": thisPrec, "cnt": thisCount}
d[thisStation]["name"] = nt.sts[thisStation]["name"]
d[thisStation]["crain"] = mrain[
nt.sts[thisStation]["climate_site"]
]
keys = list(d.keys())
keys.sort()
for k in keys:
o.write(
("%-6s%-24.24s%9.0f%10.2f%11.2f%10.2f\n")
% (
k,
d[k]["name"],
d[k]["cnt"],
d[k]["prectot"],
d[k]["crain"],
d[k]["prectot"] - d[k]["crain"],
)
)
o.write(".END\n")
o.close()
subprocess.call(
(
"pqinsert -p 'plot c 000000000000 "
"text/IEMNWSMPR.txt bogus txt' IEMNWSMPR.txt"
),
shell=True,
)
os.unlink("IEMNWSMPR.txt")
if __name__ == "__main__":
main()
| mit |
Edraak/edraak-platform | openedx/core/djangoapps/profile_images/views.py | 13 | 8494 | """
This module implements the upload and remove endpoints of the profile image api.
"""
import datetime
import itertools
import logging
from contextlib import closing
from pytz import UTC
from django.utils.translation import ugettext as _
from rest_framework import permissions, status
from rest_framework.parsers import FormParser, MultiPartParser
from rest_framework.response import Response
from rest_framework.views import APIView
from six import text_type
from openedx.core.djangoapps.user_api.accounts.image_helpers import get_profile_image_names, set_has_profile_image
from openedx.core.djangoapps.user_api.errors import UserNotFound
from openedx.core.lib.api.authentication import (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser
)
from openedx.core.lib.api.parsers import TypedFileUploadParser
from openedx.core.lib.api.permissions import IsUserInUrl
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin
from .exceptions import ImageValidationError
from .images import IMAGE_TYPES, create_profile_images, remove_profile_images, validate_uploaded_image
log = logging.getLogger(__name__)
LOG_MESSAGE_CREATE = 'Generated and uploaded images %(image_names)s for user %(user_id)s'
LOG_MESSAGE_DELETE = 'Deleted images %(image_names)s for user %(user_id)s'
def _make_upload_dt():
"""
Generate a server-side timestamp for the upload. This is in a separate
function so its behavior can be overridden in tests.
"""
return datetime.datetime.utcnow().replace(tzinfo=UTC)
class ProfileImageView(DeveloperErrorViewMixin, APIView):
"""
**Use Cases**
Add or remove profile images associated with user accounts.
The requesting user must be signed in. Users can only add profile
images to their own account. Users with staff access can remove
profile images for other user accounts. All other users can remove
only their own profile images.
**Example Requests**
POST /api/user/v1/accounts/{username}/image
DELETE /api/user/v1/accounts/{username}/image
**Example POST Responses**
When the requesting user attempts to upload an image for their own
account, the request returns one of the following responses:
* If the upload could not be performed, the request returns an HTTP 400
"Bad Request" response with information about why the request failed.
* If the upload is successful, the request returns an HTTP 204 "No
Content" response with no additional content.
If the requesting user tries to upload an image for a different
user, the request returns one of the following responses:
* If no user matches the "username" parameter, the request returns an
HTTP 404 "Not Found" response.
* If the user whose profile image is being uploaded exists, but the
requesting user does not have staff access, the request returns an
HTTP 404 "Not Found" response.
* If the specified user exists, and the requesting user has staff
access, the request returns an HTTP 403 "Forbidden" response.
**Example DELETE Responses**
When the requesting user attempts to remove the profile image for
their own account, the request returns one of the following
responses:
* If the image could not be removed, the request returns an HTTP 400
"Bad Request" response with information about why the request failed.
* If the request successfully removes the image, the request returns
an HTTP 204 "No Content" response with no additional content.
When the requesting user tries to remove the profile image for a
different user, the view will return one of the following responses:
* If the requesting user has staff access, and the "username" parameter
matches a user, the profile image for the specified user is deleted,
and the request returns an HTTP 204 "No Content" response with no
additional content.
* If the requesting user has staff access, but no user is matched by
the "username" parameter, the request returns an HTTP 404 "Not Found"
response.
* If the requesting user does not have staff access, the request
returns an HTTP 404 "Not Found" response, regardless of whether
the user exists or not.
"""
parser_classes = (MultiPartParser, FormParser, TypedFileUploadParser)
authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser)
permission_classes = (permissions.IsAuthenticated, IsUserInUrl)
upload_media_types = set(itertools.chain(*(image_type.mimetypes for image_type in IMAGE_TYPES.values())))
def post(self, request, username):
"""
POST /api/user/v1/accounts/{username}/image
"""
# validate request:
# verify that the user's
# ensure any file was sent
if 'file' not in request.FILES:
return Response(
{
"developer_message": u"No file provided for profile image",
"user_message": _(u"No file provided for profile image"),
},
status=status.HTTP_400_BAD_REQUEST
)
# process the upload.
uploaded_file = request.FILES['file']
# no matter what happens, delete the temporary file when we're done
with closing(uploaded_file):
# image file validation.
try:
validate_uploaded_image(uploaded_file)
except ImageValidationError as error:
return Response(
{"developer_message": text_type(error), "user_message": error.user_message},
status=status.HTTP_400_BAD_REQUEST,
)
# generate profile pic and thumbnails and store them
profile_image_names = get_profile_image_names(username)
create_profile_images(uploaded_file, profile_image_names)
# update the user account to reflect that a profile image is available.
set_has_profile_image(username, True, _make_upload_dt())
log.info(
LOG_MESSAGE_CREATE,
{'image_names': profile_image_names.values(), 'user_id': request.user.id}
)
# send client response.
return Response(status=status.HTTP_204_NO_CONTENT)
def delete(self, request, username):
"""
DELETE /api/user/v1/accounts/{username}/image
"""
try:
# update the user account to reflect that the images were removed.
set_has_profile_image(username, False)
# remove physical files from storage.
profile_image_names = get_profile_image_names(username)
remove_profile_images(profile_image_names)
log.info(
LOG_MESSAGE_DELETE,
{'image_names': profile_image_names.values(), 'user_id': request.user.id}
)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
# send client response.
return Response(status=status.HTTP_204_NO_CONTENT)
class ProfileImageUploadView(APIView):
"""
**DEPRECATION WARNING**
/api/profile_images/v1/{username}/upload is deprecated.
All requests should now be sent to
/api/user/v1/accounts/{username}/image
"""
parser_classes = ProfileImageView.parser_classes
authentication_classes = ProfileImageView.authentication_classes
permission_classes = ProfileImageView.permission_classes
def post(self, request, username):
"""
POST /api/profile_images/v1/{username}/upload
"""
return ProfileImageView().post(request, username)
class ProfileImageRemoveView(APIView):
"""
**DEPRECATION WARNING**
/api/profile_images/v1/{username}/remove is deprecated.
This endpoint's POST is replaced by the DELETE method at
/api/user/v1/accounts/{username}/image.
"""
authentication_classes = ProfileImageView.authentication_classes
permission_classes = ProfileImageView.permission_classes
def post(self, request, username):
"""
POST /api/profile_images/v1/{username}/remove
"""
return ProfileImageView().delete(request, username)
| agpl-3.0 |
sagangwee/sagangwee.github.io | build/pygments/build/lib.linux-i686-2.7/pygments/lexer.py | 73 | 30839 | # -*- coding: utf-8 -*-
"""
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import re
import sys
import time
import itertools
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator, text_type, add_metaclass, iteritems, Future, guess_decode
from pygments.regexopt import regex_opt
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
'default', 'words']
_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
(b'\xff\xfe\0\0', 'utf-32'),
(b'\0\0\xfe\xff', 'utf-32be'),
(b'\xff\xfe', 'utf-16'),
(b'\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(cls, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(cls, name, bases, d)
@add_metaclass(LexerMeta)
class Lexer(object):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
.. versionadded:: 1.3
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
Latin1 detection. Can also be ``'chardet'`` to use the chardet
library, if it is installed.
``inencoding``
Overrides the ``encoding`` if given.
"""
#: Name of the lexer
name = None
#: Shortcuts for the lexer
aliases = []
#: File name globs
filenames = []
#: Secondary file name globs
alias_filenames = []
#: MIME types
mimetypes = []
#: Priority, should multiple lexers match and no content is provided
priority = 0
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'guess')
self.encoding = options.get('inencoding') or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, text_type):
if self.encoding == 'guess':
text, _ = guess_decode(text)
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = text[len(bom):].decode(encoding, 'replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = text.decode(enc.get('encoding') or 'utf-8',
'replace')
text = decoded
else:
text = text.decode(self.encoding)
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
else:
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (index, tokentype, value) pairs where "index"
is the starting position of the token within the input text.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
# ------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str):
"""
Indicates that a state should include rules from another state.
"""
pass
class _inherit(object):
"""
Indicates the a state should inherit from its superclass.
"""
def __repr__(self):
return 'inherit'
inherit = _inherit()
class combined(tuple):
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch(object):
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer, _PseudoMatch(match.start(i + 1),
data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This(object):
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class default:
"""
Indicates a state or state action (e.g. #pop) to apply.
For example default('#pop') is equivalent to ('', Token, '#pop')
Note that state tuples may be used as well.
.. versionadded:: 2.0
"""
def __init__(self, state):
self.state = state
class words(Future):
"""
Indicates a list of literal words that is transformed into an optimized
regex that matches any of the words.
.. versionadded:: 2.0
"""
def __init__(self, words, prefix='', suffix=''):
self.words = words
self.prefix = prefix
self.suffix = suffix
def get(self):
return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_regex(cls, regex, rflags, state):
"""Preprocess the regular expression component of a token definition."""
if isinstance(regex, Future):
regex = regex.get()
return re.compile(regex, rflags).match
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
for state, items in iteritems(toks):
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
# deep hierarchies are processed incrementally (e.g. for
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
# will not see any inherits in B).
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
# N.b. this is the index in items (that is, the superclass
# copy), so offset required when storing below.
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
def __call__(cls, *args, **kwds):
"""Instantiate cls after preprocessing its token definitions."""
if '_tokens' not in cls.__dict__:
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds)
@add_metaclass(RegexLexerMeta)
class RegexLexer(Lexer):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: The initial state is 'root'.
#: ``new_state`` can be omitted to signify no state transition.
#: If it is a string, the state is pushed on the stack and changed.
#: If it is a tuple of strings, all states are pushed on the stack and
#: the current state will be the topmost.
#: It can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext(object):
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if action is not None:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
for item in action(self, m, ctx):
yield item
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, u'\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
class ProfilingRegexLexerMeta(RegexLexerMeta):
"""Metaclass for ProfilingRegexLexer, collects regex timing info."""
def _process_regex(cls, regex, rflags, state):
if isinstance(regex, words):
rex = regex_opt(regex.words, prefix=regex.prefix,
suffix=regex.suffix)
else:
rex = regex
compiled = re.compile(rex, rflags)
def match_func(text, pos, endpos=sys.maxsize):
info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
t0 = time.time()
res = compiled.match(text, pos, endpos)
t1 = time.time()
info[0] += 1
info[1] += t1 - t0
return res
return match_func
@add_metaclass(ProfilingRegexLexerMeta)
class ProfilingRegexLexer(RegexLexer):
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
_prof_data = []
_prof_sort_index = 4 # defaults to time per call
def get_tokens_unprocessed(self, text, stack=('root',)):
# this needs to be a stack, since using(this) will produce nested calls
self.__class__._prof_data.append({})
for tok in RegexLexer.get_tokens_unprocessed(self, text, stack):
yield tok
rawdata = self.__class__._prof_data.pop()
data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
n, 1000 * t, 1000 * t / n)
for ((s, r), (n, t)) in rawdata.items()),
key=lambda x: x[self._prof_sort_index],
reverse=True)
sum_total = sum(x[3] for x in data)
print()
print('Profiling result for %s lexing %d chars in %.3f ms' %
(self.__class__.__name__, len(text), sum_total))
print('=' * 110)
print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
print('-' * 110)
for d in data:
print('%-20s %-65s %5d %8.4f %8.4f' % d)
print('=' * 110)
| mit |
B3AU/waveTree | sklearn/decomposition/sparse_pca.py | 10 | 8903 | """Matrix factorization with Sparse PCA"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import numpy as np
from ..utils import check_random_state, array2d
from ..linear_model import ridge_regression
from ..base import BaseEstimator, TransformerMixin
from .dict_learning import dict_learning, dict_learning_online
class SparsePCA(BaseEstimator, TransformerMixin):
"""Sparse Principal Components Analysis (SparsePCA)
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components : int,
Number of sparse atoms to extract.
alpha : float,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run.
U_init : array of shape (n_samples, n_components),
Initial values for the loadings for warm restart scenarios.
V_init : array of shape (n_components, n_features),
Initial values for the components for warm restart scenarios.
verbose :
Degree of verbosity of the printed output.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
Sparse components extracted from the data.
`error_` : array
Vector of errors at each iteration.
See also
--------
PCA
MiniBatchSparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
max_iter=1000, tol=1e-8, method='lars', n_jobs=1, U_init=None,
V_init=None, verbose=False, random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.max_iter = max_iter
self.tol = tol
self.method = method
self.n_jobs = n_jobs
self.U_init = U_init
self.V_init = V_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
code_init = self.V_init.T if self.V_init is not None else None
dict_init = self.U_init.T if self.U_init is not None else None
Vt, _, E = dict_learning(X.T, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.method, n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=random_state,
code_init=code_init,
dict_init=dict_init)
self.components_ = Vt.T
self.error_ = E
return self
def transform(self, X, ridge_alpha=None):
"""Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
----------
X: array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
ridge_alpha: float, default: 0.01
Amount of ridge shrinkage to apply in order to improve
conditioning.
Returns
-------
X_new array, shape (n_samples, n_components)
Transformed data.
"""
ridge_alpha = self.ridge_alpha if ridge_alpha is None else ridge_alpha
U = ridge_regression(self.components_.T, X.T, ridge_alpha,
solver='dense_cholesky')
s = np.sqrt((U ** 2).sum(axis=0))
s[s == 0] = 1
U /= s
return U
class MiniBatchSparsePCA(SparsePCA):
"""Mini-batch Sparse Principal Components Analysis
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components : int,
number of sparse atoms to extract
alpha : int,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
n_iter : int,
number of iterations to perform for each mini batch
callback : callable,
callable that gets invoked every five iterations
batch_size : int,
the number of features to take in each mini batch
verbose :
degree of output the procedure will print
shuffle : boolean,
whether to shuffle the data before splitting it in batches
n_jobs : int,
number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
Sparse components extracted from the data.
`error_` : array
Vector of errors at each iteration.
See also
--------
PCA
SparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
n_iter=100, callback=None, batch_size=3, verbose=False,
shuffle=True, n_jobs=1, method='lars', random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.n_iter = n_iter
self.callback = callback
self.batch_size = batch_size
self.verbose = verbose
self.shuffle = shuffle
self.n_jobs = n_jobs
self.method = method
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
Vt, _ = dict_learning_online(X.T, n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=True,
dict_init=None, verbose=self.verbose,
callback=self.callback,
batch_size=self.batch_size,
shuffle=self.shuffle,
n_jobs=self.n_jobs, method=self.method,
random_state=random_state)
self.components_ = Vt.T
return self
| bsd-3-clause |
goddino/libjingle | trunk/build/android/pylib/base/base_test_runner.py | 2 | 7932 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base class for running tests on a single device."""
import contextlib
import httplib
import logging
import os
import tempfile
import time
from pylib import android_commands
from pylib import constants
from pylib import ports
from pylib.chrome_test_server_spawner import SpawningServer
from pylib.flag_changer import FlagChanger
from pylib.forwarder import Forwarder
from pylib.valgrind_tools import CreateTool
# TODO(frankf): Move this to pylib/utils
import lighttpd_server
# A file on device to store ports of net test server. The format of the file is
# test-spawner-server-port:test-server-port
NET_TEST_SERVER_PORT_INFO_FILE = 'net-test-server-ports'
class BaseTestRunner(object):
"""Base class for running tests on a single device.
A subclass should implement RunTests() with no parameter, so that calling
the Run() method will set up tests, run them and tear them down.
"""
def __init__(self, device, tool, build_type):
"""
Args:
device: Tests will run on the device of this ID.
shard_index: Index number of the shard on which the test suite will run.
build_type: 'Release' or 'Debug'.
"""
self.device = device
self.adb = android_commands.AndroidCommands(device=device)
self.tool = CreateTool(tool, self.adb)
self._http_server = None
self._forwarder = None
self._forwarder_device_port = 8000
self.forwarder_base_url = ('http://localhost:%d' %
self._forwarder_device_port)
self.flags = FlagChanger(self.adb)
self.flags.AddFlags(['--disable-fre'])
self._spawning_server = None
self._spawner_forwarder = None
# We will allocate port for test server spawner when calling method
# LaunchChromeTestServerSpawner and allocate port for test server when
# starting it in TestServerThread.
self.test_server_spawner_port = 0
self.test_server_port = 0
self.build_type = build_type
def _PushTestServerPortInfoToDevice(self):
"""Pushes the latest port information to device."""
self.adb.SetFileContents(self.adb.GetExternalStorage() + '/' +
NET_TEST_SERVER_PORT_INFO_FILE,
'%d:%d' % (self.test_server_spawner_port,
self.test_server_port))
def RunTest(self, test):
"""Runs a test. Needs to be overridden.
Args:
test: A test to run.
Returns:
Tuple containing:
(base_test_result.TestRunResults, tests to rerun or None)
"""
raise NotImplementedError
def PushDependencies(self):
"""Push all dependencies to device once before all tests are run."""
pass
def SetUp(self):
"""Run once before all tests are run."""
Forwarder.KillDevice(self.adb, self.tool)
self.PushDependencies()
def TearDown(self):
"""Run once after all tests are run."""
self.ShutdownHelperToolsForTestSuite()
def CopyTestData(self, test_data_paths, dest_dir):
"""Copies |test_data_paths| list of files/directories to |dest_dir|.
Args:
test_data_paths: A list of files or directories relative to |dest_dir|
which should be copied to the device. The paths must exist in
|DIR_SOURCE_ROOT|.
dest_dir: Absolute path to copy to on the device.
"""
for p in test_data_paths:
self.adb.PushIfNeeded(
os.path.join(constants.DIR_SOURCE_ROOT, p),
os.path.join(dest_dir, p))
def LaunchTestHttpServer(self, document_root, port=None,
extra_config_contents=None):
"""Launches an HTTP server to serve HTTP tests.
Args:
document_root: Document root of the HTTP server.
port: port on which we want to the http server bind.
extra_config_contents: Extra config contents for the HTTP server.
"""
self._http_server = lighttpd_server.LighttpdServer(
document_root, port=port, extra_config_contents=extra_config_contents)
if self._http_server.StartupHttpServer():
logging.info('http server started: http://localhost:%s',
self._http_server.port)
else:
logging.critical('Failed to start http server')
self.StartForwarderForHttpServer()
return (self._forwarder_device_port, self._http_server.port)
def _ForwardPort(self, port_pairs):
"""Creates a forwarder instance if needed and forward a port."""
if not self._forwarder:
self._forwarder = Forwarder(self.adb, self.build_type)
self._forwarder.Run(port_pairs, self.tool, '127.0.0.1')
def StartForwarder(self, port_pairs):
"""Starts TCP traffic forwarding for the given |port_pairs|.
Args:
host_port_pairs: A list of (device_port, local_port) tuples to forward.
"""
self._ForwardPort(port_pairs)
def StartForwarderForHttpServer(self):
"""Starts a forwarder for the HTTP server.
The forwarder forwards HTTP requests and responses between host and device.
"""
self._ForwardPort([(self._forwarder_device_port, self._http_server.port)])
def RestartHttpServerForwarderIfNecessary(self):
"""Restarts the forwarder if it's not open."""
# Checks to see if the http server port is being used. If not forwards the
# request.
# TODO(dtrainor): This is not always reliable because sometimes the port
# will be left open even after the forwarder has been killed.
if not ports.IsDevicePortUsed(self.adb,
self._forwarder_device_port):
self.StartForwarderForHttpServer()
def ShutdownHelperToolsForTestSuite(self):
"""Shuts down the server and the forwarder."""
# Forwarders should be killed before the actual servers they're forwarding
# to as they are clients potentially with open connections and to allow for
# proper hand-shake/shutdown.
Forwarder.KillDevice(self.adb, self.tool)
if self._forwarder:
self._forwarder.Close()
if self._http_server:
self._http_server.ShutdownHttpServer()
if self._spawning_server:
self._spawning_server.Stop()
self.flags.Restore()
def CleanupSpawningServerState(self):
"""Tells the spawning server to clean up any state.
If the spawning server is reused for multiple tests, this should be called
after each test to prevent tests affecting each other.
"""
if self._spawning_server:
self._spawning_server.CleanupState()
def LaunchChromeTestServerSpawner(self):
"""Launches test server spawner."""
server_ready = False
error_msgs = []
# TODO(pliard): deflake this function. The for loop should be removed as
# well as IsHttpServerConnectable(). spawning_server.Start() should also
# block until the server is ready.
# Try 3 times to launch test spawner server.
for i in xrange(0, 3):
self.test_server_spawner_port = ports.AllocateTestServerPort()
self._ForwardPort(
[(self.test_server_spawner_port, self.test_server_spawner_port)])
self._spawning_server = SpawningServer(self.test_server_spawner_port,
self.adb,
self.tool,
self._forwarder,
self.build_type)
self._spawning_server.Start()
server_ready, error_msg = ports.IsHttpServerConnectable(
'127.0.0.1', self.test_server_spawner_port, path='/ping',
expected_read='ready')
if server_ready:
break
else:
error_msgs.append(error_msg)
self._spawning_server.Stop()
# Wait for 2 seconds then restart.
time.sleep(2)
if not server_ready:
logging.error(';'.join(error_msgs))
raise Exception('Can not start the test spawner server.')
self._PushTestServerPortInfoToDevice()
| bsd-3-clause |
phoenix741/spksrc | spk/sabnzbd/src/CharTranslator.py | 61 | 15564 | #!/usr/local/sabnzbd/env/bin/python -OO
#-*- coding: iso-8859-15 -*-
#
# If a file has been archieved under an ISO-8859 environment and unarchived
# under an UTF8 environment, then you will get an encoding format problem.
# The file will not be readable through SAMBA.
#
# Renaming script for SABnzbd runnning under Synology NAS.
# By default the NZB software is running under UTF-8 encoding format
# in order to correctly handle the french accents (éèàç...) a SABnzbd
# post-script must be run in order to convert the file encoding.
#
# To fix this problem, you must convert the encoding format
# to the UTF8 (default Synology encoding)
# The script is trying to detect if the original file/directory are coming
# from a RAR archive. In this case the unrar command on your Syno system will
# unpack in CP850 format (DOS).
# NB: in all cases, files will be readable through samba, even if the detection
# failed. But converted characters will not be good, ex: Î? instead é
#
# Remark: I guess it should work for any other encoding style. Just replace
# ISO-8859-15 (Western Europe) by the one coresponding to your country:
# http://en.wikipedia.org/wiki/Character_encoding#Common_character_encodings
#
# Done by LapinFou
# date | version | comment
#--------------------------------------
# 12-04-22 | 1.0 | Initial version
# 12-04-22 | 1.1 | Change encoding to ISO-8859-15
# | Added CP437 special characters (0x80-0x9A)
# 12-04-24 | 1.2 | Mixed encoding is now supported
# | UTF-8 encoding format detected
# 12-04-24 | 1.3 | Fixed typo line 57 (test must be 0xA0, not 0xA1)
# 12-05-24 | 1.4 | Added an exception for "Â" character
# | Added 7z unpack
# | Added move option
# | Added Syno index option
# 13-02-15 | 1.5 | Added an option to activate Sickbear post-processing
# | More evoluate move option (merge is now managed)
# | Argv1 folder is not renamed anymore (already managed by SABnzbd)
# 13-02-18 | 1.6 | Argv1 folder is now renamed (not managed by SABnzbd)
# 13-02-19 | 1.7 | Changed CP437 detection range
# 13-02-19 | 1.8 | Changed CP437 DOS encoding style with CP850
# 13-10-02 | 1.9 | Fixed an issue with some NZB and Sickbeard option
# | In order to simplify the support, the script version is now displayed
#
# get library modules
import sys
import os
import subprocess
import shutil
scriptVersionIs = 1.9
# If empty, then no move
# Format must be synology full path (case sensitive). For ex: /volume1/video/News
MoveToThisFolder = ''
# If MoveMergeSubFolder = False, then equivalent to unix command:
# mv -rf srcFolder destFolder
# In case of conflict between an already existing sub-folder in the destination folder:
# the destination sub-folder will be replaced with source sub-folder
#
# If MoveMergeSubFolder = True, then equivalent to unix command:
# cp -rf srcFolder/* destFolder/
# rm -rf srcFolder
# In case of conflict between an already existing sub-folder in the destination folder:
# the destination sub-folder will be merged with source sub-folder (kind of incremental)
MoveMergeSubFolder = True
# /!\ IndexInSynoDLNA and SickBeardPostProcessing are exclusive
# =============================================================
# If "True", then the folder will be indexed into Synology DLNA
# By default it is "False"
IndexInSynoDLNA = False
# If "True", the folder will be send to SickBeard for Post-Processing
# By default it is "False"
SickBeardPostProcessing = False
# If "True", all .7z files will be unpacked then source .7z file will be deleted
# By default it is "False"
Unpack7z = True
########################
# ----- Functions ---- #
########################
# Special character hex range:
# CP850: 0x80-0xA5 (fortunately not used in ISO-8859-15)
# UTF-8: 1st hex code 0xC2-0xC3 followed by a 2nd hex code 0xA1-0xFF
# ISO-8859-15: 0xA6-0xFF
# The function will detect if fileDirName contains a special character
# If there is special character, detects if it is a UTF-8, CP850 or ISO-8859-15 encoding
def renameFunc(fullPath, fileDirName):
encodingDetected = False
# parsing all files/directories in odrer to detect if CP850 is used
for Idx in range(len(fileDirName)):
# /!\ detection is done 2char by 2char for UTF-8 special character
if (len(fileDirName) != 1) & (Idx < (len(fileDirName) - 1)):
# Detect UTF-8
if ((fileDirName[Idx] == '\xC2') | (fileDirName[Idx] == '\xC3')) & ((fileDirName[Idx+1] >= '\xA0') & (fileDirName[Idx+1] <= '\xFF')):
print os.path.join(fullPath, fileDirName) + " -> UTF-8 detected: Nothing to be done"
encodingDetected = True
break;
# Detect CP850
elif ((fileDirName[Idx] >= '\x80') & (fileDirName[Idx] <= '\xA5')):
utf8Name = fileDirName.decode('cp850')
utf8Name = utf8Name.encode('utf-8')
os.rename(os.path.join(fullPath, fileDirName), os.path.join(fullPath, utf8Name))
print os.path.join(fullPath, utf8Name) + " -> CP850 detected: Renamed"
encodingDetected = True
break;
# Detect ISO-8859-15
elif (fileDirName[Idx] >= '\xA6') & (fileDirName[Idx] <= '\xFF'):
utf8Name = fileDirName.decode('iso-8859-15')
utf8Name = utf8Name.encode('utf-8')
os.rename(os.path.join(fullPath, fileDirName), os.path.join(fullPath, utf8Name))
print os.path.join(fullPath, utf8Name) + " -> ISO-8859-15 detected: Renamed"
encodingDetected = True
break;
else:
# Detect CP850
if ((fileDirName[Idx] >= '\x80') & (fileDirName[Idx] <= '\xA5')):
utf8Name = fileDirName.decode('cp850')
utf8Name = utf8Name.encode('utf-8')
os.rename(os.path.join(fullPath, fileDirName), os.path.join(fullPath, utf8Name))
print os.path.join(fullPath, utf8Name) + " -> CP850 detected: Renamed"
encodingDetected = True
break;
# Detect ISO-8859-15
elif (fileDirName[Idx] >= '\xA6') & (fileDirName[Idx] <= '\xFF'):
utf8Name = fileDirName.decode('iso-8859-15')
utf8Name = utf8Name.encode('utf-8')
os.rename(os.path.join(fullPath, fileDirName), os.path.join(fullPath, utf8Name))
print os.path.join(fullPath, utf8Name) + " -> ISO-8859-15 detected: Renamed"
encodingDetected = True
break;
if (encodingDetected == False):
print os.path.join(fullPath, fileDirName) + " -> No special characters detected: Nothing to be done"
return
# scan .7z files and unpack them
def unpack7zFunc(DirName):
print "Scanning for .7z file(s), then unpack them"
print "Scanning files..."
DetectedFiles = False
for dirname, dirnames, filenames in os.walk(DirName):
for filename in filenames:
if (filename[-3:] == ".7z"):
print "Unpack %s..." %(filename)
DetectedFiles = True
try:
filepath = os.path.join(dirname, filename)
syno7z_cmd = ['/usr/syno/bin/7z', 'x', '-y', filepath]
p = subprocess.Popen(syno7z_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if (str(out) != ''):
print("7z result: " + filepath + " successfully unpacked")
os.remove(filepath)
print(filepath + " has been deleted")
if (str(err) != ''):
print("7z failed: " + str(err))
except OSError, e:
print("Unable to run 7z: "+str(e))
if DetectedFiles:
print "Scanning for .7z files Done !"
else:
print "No .7z file Detected !"
return
# add folder in the Syno index database (DLNA server)
def addToSynoIndex(DirName):
print "Adding folder in the DLNA server"
synoindex_cmd = ['/usr/syno/bin/synoindex', '-A', DirName]
try:
p = subprocess.Popen(synoindex_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if (str(out) == ''):
print("synoindex result: " + DirName + " successfully added to Synology database")
else:
print("synoindex result: " + str(out))
if (str(err) != ''):
print("synoindex failed: " + str(err))
except OSError, e:
print("Unable to run synoindex: "+str(e))
return
########################
# --- Main Program --- #
########################
print "Launching CharTranslator Python script (v%s) ..." %scriptVersionIs
print ""
# Get scripts directory of the SABnzbd from its config.ini file
if (SickBeardPostProcessing == True):
print 100*'-'
print "SickBeardPostProcessing option is ON"
print "Locating SABnzbd config.ini file..."
# Get SABnzbd rundir folder
currentFolder = os.getcwd()
# SABnzbd config.ini location
SabScriptsFolder = ''
confFile = '../../var/config.ini'
# Check that file does exit
if os.path.isfile(confFile):
SabConfigFile = open('../../var/config.ini', 'r')
# Parse each lines in order to get scripts folder path
for line in SabConfigFile.readlines():
if line[:len('script_dir')] == 'script_dir':
# Get script_dir result
SabScriptsFolder = line.split('=')[1]
# Remove 1st space + \n
if (SabScriptsFolder[0] == ' '):
SabScriptsFolder = SabScriptsFolder[1:]
SabScriptsFolder = SabScriptsFolder.replace('\n', '')
break
SabConfigFile.close
# Check that SABnzbd script folder has been found
if (SabScriptsFolder == ''):
print 100*'#'
print "SABnzbd script_dir parameter not found!"
print 100*'#'
sys.exit(1)
else:
print "SABnzbd script_dir parameter is: '%s'" %SabScriptsFolder
# Load SickBeard module
SickBeardScript = os.path.join(SabScriptsFolder, 'autoProcessTV.py')
# Check that SickBeard post-processing is present into SABnzbd scripts folder
if os.path.isfile(SickBeardScript):
sys.path.append(SabScriptsFolder)
print "Loading SickBeard 'autoProcessTV' module"
import autoProcessTV
print 100*'-'
print ""
else:
print 100*'#'
print "Unable to find SickBeard autoProcessTV.py script in folder:"
print SickBeardScript
print 100*'#'
sys.exit(1)
# Exit if the file doesn't exist
else:
print 100*'#'
print "Unable to find SABnzbd config.ini file in this folder:"
print os.path.join(currentFolder, confFile)
print 100*'#'
sys.exit(1)
# Change current directory to SABnzbd argument 1
os.chdir(sys.argv[1])
# display directory of the SABnzbd job
currentFolder = os.getcwd()
print "Current folder is " + currentFolder
# rename SABnzbd job directory (coming from SABnzbd: never in CP850 format)
print "Renaming destination folder to UTF-8 format..."
renameFunc('', currentFolder)
currentFolder = os.getcwd()
print "Destination folder renamed !"
print ""
# Unpack 7z file(s)
if (Unpack7z == True):
print 100*'-'
unpack7zFunc(currentFolder)
print 100*'-'
print ""
# process each sub-folders starting from the deepest level
print 100*'-'
print "Renaming folders to UTF-8 format..."
for dirname, dirnames, filenames in os.walk('.', topdown=False):
for subdirname in dirnames:
renameFunc(dirname, subdirname)
print "Folder renaming Done !"
print 100*'-'
print ""
# process each file recursively
print 100*'-'
print "Renaming files to UTF-8 format..."
for dirname, dirnames, filenames in os.walk('.'):
for filename in filenames:
renameFunc(dirname, filename)
print "Files renaming Done !"
print 100*'-'
print ""
# Move current folder to an another destination if the option has been configured
if (MoveToThisFolder != ''):
print 100*'-'
print "Moving folder:"
print os.getcwd()
print "to:"
print MoveToThisFolder
# Check if destination folder does exist and can be written
# If destination doesn't exist, it will be created
if (os.access(MoveToThisFolder, os.F_OK) == False):
os.makedirs(MoveToThisFolder)
os.chmod(MoveToThisFolder, 0777)
# Check write access
if (os.access(MoveToThisFolder, os.W_OK) == False):
print 100*'#'
print "File(s)/Folder(s) can not be move in %s" %(MoveToThisFolder)
print "Please, check Unix permissions"
print 100*'#'
sys.exit(1)
# If MoveMergeSubFolder is True, then move all file(s)/folder(s) to destination
# then remove source folder
destFolder = os.path.join(MoveToThisFolder, os.path.split(os.getcwd())[-1])
if (MoveMergeSubFolder):
print " Info: Merge option is ON (incremental copy)"
try:
synoCopy_cmd = ['/bin/cp', '-Rpf', os.path.abspath(currentFolder), MoveToThisFolder]
p = subprocess.Popen(synoCopy_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if (str(err) != ''):
print("Copy failed: " + str(err))
sys.exit(1)
except OSError, e:
print("Unable to run cp: " + str(e))
sys.exit(1)
os.chdir(MoveToThisFolder)
shutil.rmtree(currentFolder)
# If MoveMergeSubFolder is False, remove folder with same (if exists)
# then move all file(s)/folder(s) to destination and remove source folder
else:
print " Info: Merge option is OFF (existing data will be deleted and replaced)"
# Remove if destination already exist
if os.path.exists(destFolder):
shutil.rmtree(destFolder)
shutil.move(currentFolder, MoveToThisFolder)
os.chdir(MoveToThisFolder)
# Update currentFolder variable
os.chdir(destFolder)
currentFolder = os.getcwd()
print 100*'-'
print ""
# Add multimedia files in the Syno DLNA if the option has been enabled
if (IndexInSynoDLNA == True) & (SickBeardPostProcessing == False):
print 100*'-'
addToSynoIndex(currentFolder)
print ""
print 100*'-'
# Send to SickBeard for post-processing
elif (IndexInSynoDLNA == False) & (SickBeardPostProcessing == True):
print 100*'-'
print "Launching SickBeard post-processing..."
autoProcessTV.processEpisode(currentFolder)
print "SickBeard post-processing done!"
print ""
print 100*'-'
# Display error message + advise if both options are enabled
elif (IndexInSynoDLNA == True) & (SickBeardPostProcessing == True):
print 100*'#'
print "IndexInSynoDLNA and SickBeardPostProcessing options are exclusive"
print "Please check your configuration"
print ""
print "If you want to have both options enables at the same time, please processed as follow:"
print " 1- Enable only SickBeardPostProcessing option"
print " 2- In SickBeard GUI -> Config -> Notifications -> Enable 'Synology Indexer'"
print 100*'#'
sys.exit(1)
print ""
print "Character encoding translation done!"
| bsd-3-clause |
ddico/odoo | addons/account/tests/test_account_analytic.py | 5 | 2464 | # -*- coding: utf-8 -*-
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.tests import tagged
from odoo.exceptions import UserError
@tagged('post_install', '-at_install')
class TestAccountAnalyticAccount(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
cls.env.user.write({
'groups_id': [
(4, cls.env.ref('analytic.group_analytic_accounting').id),
(4, cls.env.ref('analytic.group_analytic_tags').id),
],
})
# By default, tests are run with the current user set on the first company.
cls.env.user.company_id = cls.company_data['company']
cls.test_analytic_account = cls.env['account.analytic.account'].create({'name': 'test_analytic_account'})
cls.test_analytic_tag = cls.env['account.analytic.tag'].create({'name': 'test_analytic_tag'})
def test_changing_analytic_company(self):
''' Ensure you can't change the company of an account.analytic.account if there are some journal entries '''
self.env['account.move'].create({
'move_type': 'entry',
'date': '2019-01-01',
'line_ids': [
(0, 0, {
'name': 'line_debit',
'account_id': self.company_data['default_account_revenue'].id,
'analytic_account_id': self.test_analytic_account.id,
'analytic_tag_ids': [(6, 0, self.test_analytic_tag.ids)],
}),
(0, 0, {
'name': 'line_credit',
'account_id': self.company_data['default_account_revenue'].id,
}),
],
})
# Set a different company on the analytic account.
with self.assertRaises(UserError), self.cr.savepoint():
self.test_analytic_account.company_id = self.company_data_2['company']
# Making the analytic account not company dependent is allowed.
self.test_analytic_account.company_id = False
# Set a different company on the analytic tag.
with self.assertRaises(UserError), self.cr.savepoint():
self.test_analytic_tag.company_id = self.company_data_2['company']
# Making the analytic tag not company dependent is allowed.
self.test_analytic_tag.company_id = False
| agpl-3.0 |
wizyoung/workflows.kyoyue | chardet/euctwprober.py | 289 | 1747 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTW_SM_MODEL
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
super(EUCTWProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
self.distribution_analyzer = EUCTWDistributionAnalysis()
self.reset()
@property
def charset_name(self):
return "EUC-TW"
@property
def language(self):
return "Taiwan"
| mit |
JioCloud/neutron | neutron/agent/ovsdb/native/commands.py | 13 | 13697 | # Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_log import log as logging
from oslo_utils import excutils
from neutron.agent.ovsdb import api
from neutron.agent.ovsdb.native import idlutils
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
class BaseCommand(api.Command):
def __init__(self, api):
self.api = api
self.result = None
def execute(self, check_error=False, log_errors=True):
try:
with self.api.transaction(check_error, log_errors) as txn:
txn.add(self)
return self.result
except Exception:
with excutils.save_and_reraise_exception() as ctx:
if log_errors:
LOG.exception(_LE("Error executing command"))
if not check_error:
ctx.reraise = False
def __str__(self):
command_info = self.__dict__
return "%s(%s)" % (
self.__class__.__name__,
", ".join("%s=%s" % (k, v) for k, v in command_info.items()
if k not in ['api', 'result']))
class AddBridgeCommand(BaseCommand):
def __init__(self, api, name, may_exist):
super(AddBridgeCommand, self).__init__(api)
self.name = name
self.may_exist = may_exist
def run_idl(self, txn):
if self.may_exist:
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name',
self.name, None)
if br:
return
row = txn.insert(self.api._tables['Bridge'])
row.name = self.name
self.api._ovs.verify('bridges')
self.api._ovs.bridges = self.api._ovs.bridges + [row]
# Add the internal bridge port
cmd = AddPortCommand(self.api, self.name, self.name, self.may_exist)
cmd.run_idl(txn)
cmd = DbSetCommand(self.api, 'Interface', self.name,
('type', 'internal'))
cmd.run_idl(txn)
class DelBridgeCommand(BaseCommand):
def __init__(self, api, name, if_exists):
super(DelBridgeCommand, self).__init__(api)
self.name = name
self.if_exists = if_exists
def run_idl(self, txn):
try:
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name',
self.name)
except idlutils.RowNotFound:
if self.if_exists:
return
else:
msg = _LE("Bridge %s does not exist") % self.name
LOG.error(msg)
raise RuntimeError(msg)
self.api._ovs.verify('bridges')
for port in br.ports:
cmd = DelPortCommand(self.api, port.name, self.name,
if_exists=True)
cmd.run_idl(txn)
bridges = self.api._ovs.bridges
bridges.remove(br)
self.api._ovs.bridges = bridges
self.api._tables['Bridge'].rows[br.uuid].delete()
class BridgeExistsCommand(BaseCommand):
def __init__(self, api, name):
super(BridgeExistsCommand, self).__init__(api)
self.name = name
def run_idl(self, txn):
self.result = bool(idlutils.row_by_value(self.api.idl, 'Bridge',
'name', self.name, None))
class ListBridgesCommand(BaseCommand):
def __init__(self, api):
super(ListBridgesCommand, self).__init__(api)
def run_idl(self, txn):
# NOTE (twilson) [x.name for x in rows.values()] if no index
self.result = [x.name for x in
self.api._tables['Bridge'].rows.values()]
class BrGetExternalIdCommand(BaseCommand):
def __init__(self, api, name, field):
super(BrGetExternalIdCommand, self).__init__(api)
self.name = name
self.field = field
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name)
self.result = br.external_ids[self.field]
class BrSetExternalIdCommand(BaseCommand):
def __init__(self, api, name, field, value):
super(BrSetExternalIdCommand, self).__init__(api)
self.name = name
self.field = field
self.value = value
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name)
external_ids = getattr(br, 'external_ids', {})
external_ids[self.field] = self.value
br.external_ids = external_ids
class DbSetCommand(BaseCommand):
def __init__(self, api, table, record, *col_values):
super(DbSetCommand, self).__init__(api)
self.table = table
self.record = record
self.col_values = col_values
def run_idl(self, txn):
record = idlutils.row_by_record(self.api.idl, self.table, self.record)
for col, val in self.col_values:
# TODO(twilson) Ugh, the OVS library doesn't like OrderedDict
# We're only using it to make a unit test work, so we should fix
# this soon.
if isinstance(val, collections.OrderedDict):
val = dict(val)
setattr(record, col, val)
class DbClearCommand(BaseCommand):
def __init__(self, api, table, record, column):
super(DbClearCommand, self).__init__(api)
self.table = table
self.record = record
self.column = column
def run_idl(self, txn):
record = idlutils.row_by_record(self.api.idl, self.table, self.record)
# Create an empty value of the column type
value = type(getattr(record, self.column))()
setattr(record, self.column, value)
class DbGetCommand(BaseCommand):
def __init__(self, api, table, record, column):
super(DbGetCommand, self).__init__(api)
self.table = table
self.record = record
self.column = column
def run_idl(self, txn):
record = idlutils.row_by_record(self.api.idl, self.table, self.record)
# TODO(twilson) This feels wrong, but ovs-vsctl returns single results
# on set types without the list. The IDL is returning them as lists,
# even if the set has the maximum number of items set to 1. Might be
# able to inspect the Schema and just do this conversion for that case.
result = idlutils.get_column_value(record, self.column)
if isinstance(result, list) and len(result) == 1:
self.result = result[0]
else:
self.result = result
class SetControllerCommand(BaseCommand):
def __init__(self, api, bridge, targets):
super(SetControllerCommand, self).__init__(api)
self.bridge = bridge
self.targets = targets
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
controllers = []
for target in self.targets:
controller = txn.insert(self.api._tables['Controller'])
controller.target = target
controllers.append(controller)
br.verify('controller')
br.controller = controllers
class DelControllerCommand(BaseCommand):
def __init__(self, api, bridge):
super(DelControllerCommand, self).__init__(api)
self.bridge = bridge
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
br.controller = []
class GetControllerCommand(BaseCommand):
def __init__(self, api, bridge):
super(GetControllerCommand, self).__init__(api)
self.bridge = bridge
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
br.verify('controller')
self.result = [c.target for c in br.controller]
class SetFailModeCommand(BaseCommand):
def __init__(self, api, bridge, mode):
super(SetFailModeCommand, self).__init__(api)
self.bridge = bridge
self.mode = mode
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
br.verify('fail_mode')
br.fail_mode = self.mode
class AddPortCommand(BaseCommand):
def __init__(self, api, bridge, port, may_exist):
super(AddPortCommand, self).__init__(api)
self.bridge = bridge
self.port = port
self.may_exist = may_exist
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
if self.may_exist:
port = idlutils.row_by_value(self.api.idl, 'Port', 'name',
self.port, None)
if port:
return
port = txn.insert(self.api._tables['Port'])
port.name = self.port
br.verify('ports')
ports = getattr(br, 'ports', [])
ports.append(port)
br.ports = ports
iface = txn.insert(self.api._tables['Interface'])
iface.name = self.port
port.verify('interfaces')
ifaces = getattr(port, 'interfaces', [])
ifaces.append(iface)
port.interfaces = ifaces
class DelPortCommand(BaseCommand):
def __init__(self, api, port, bridge, if_exists):
super(DelPortCommand, self).__init__(api)
self.port = port
self.bridge = bridge
self.if_exists = if_exists
def run_idl(self, txn):
try:
port = idlutils.row_by_value(self.api.idl, 'Port', 'name',
self.port)
except idlutils.RowNotFound:
if self.if_exists:
return
msg = _LE("Port %s does not exist") % self.port
raise RuntimeError(msg)
if self.bridge:
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name',
self.bridge)
else:
br = next(b for b in self.api._tables['Bridge'].rows.values()
if port in b.ports)
if port.uuid not in br.ports and not self.if_exists:
# TODO(twilson) Make real errors across both implementations
msg = _LE("Port %(port)s does not exist on %(bridge)s!") % {
'port': self.name, 'bridge': self.bridge
}
LOG.error(msg)
raise RuntimeError(msg)
br.verify('ports')
ports = br.ports
ports.remove(port)
br.ports = ports
# Also remove port/interface directly for indexing?
port.verify('interfaces')
for iface in port.interfaces:
self.api._tables['Interface'].rows[iface.uuid].delete()
self.api._tables['Port'].rows[port.uuid].delete()
class ListPortsCommand(BaseCommand):
def __init__(self, api, bridge):
super(ListPortsCommand, self).__init__(api)
self.bridge = bridge
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
self.result = [p.name for p in br.ports if p.name != self.bridge]
class PortToBridgeCommand(BaseCommand):
def __init__(self, api, name):
super(PortToBridgeCommand, self).__init__(api)
self.name = name
def run_idl(self, txn):
# TODO(twilson) This is expensive!
# This traversal of all ports could be eliminated by caching the bridge
# name on the Port's (or Interface's for iface_to_br) external_id field
# In fact, if we did that, the only place that uses to_br functions
# could just add the external_id field to the conditions passed to find
port = idlutils.row_by_value(self.api.idl, 'Port', 'name', self.name)
bridges = self.api._tables['Bridge'].rows.values()
self.result = next(br.name for br in bridges if port in br.ports)
class DbListCommand(BaseCommand):
def __init__(self, api, table, records, columns, if_exists):
super(DbListCommand, self).__init__(api)
self.table = self.api._tables[table]
self.columns = columns or self.table.columns.keys() + ['_uuid']
self.if_exists = if_exists
if records:
self.records = [
idlutils.row_by_record(self.api.idl, table, record).uuid
for record in records]
else:
self.records = self.table.rows.keys()
def run_idl(self, txn):
self.result = [
{
c: idlutils.get_column_value(self.table.rows[uuid], c)
for c in self.columns
}
for uuid in self.records
]
class DbFindCommand(BaseCommand):
def __init__(self, api, table, *conditions, **kwargs):
super(DbFindCommand, self).__init__(api)
self.table = self.api._tables[table]
self.conditions = conditions
self.columns = (kwargs.get('columns') or
self.table.columns.keys() + ['_uuid'])
def run_idl(self, txn):
self.result = [
{
c: idlutils.get_column_value(r, c)
for c in self.columns
}
for r in self.table.rows.values()
if idlutils.row_match(r, self.conditions)
]
| apache-2.0 |
sahiljain/catapult | third_party/google-endpoints/libpasteurize/fixes/fix_metaclass.py | 61 | 3268 | u"""
Fixer for (metaclass=X) -> __metaclass__ = X
Some semantics (see PEP 3115) may be altered in the translation."""
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, syms, Node, Leaf, Newline, find_root
from lib2to3.pygram import token
from libfuturize.fixer_util import indentation, suitify
# from ..fixer_util import Name, syms, Node, Leaf, Newline, find_root, indentation, suitify
def has_metaclass(parent):
results = None
for node in parent.children:
kids = node.children
if node.type == syms.argument:
if kids[0] == Leaf(token.NAME, u"metaclass") and \
kids[1] == Leaf(token.EQUAL, u"=") and \
kids[2]:
#Hack to avoid "class X(=):" with this case.
results = [node] + kids
break
elif node.type == syms.arglist:
# Argument list... loop through it looking for:
# Node(*, [*, Leaf(token.NAME, u"metaclass"), Leaf(token.EQUAL, u"="), Leaf(*, *)]
for child in node.children:
if results: break
if child.type == token.COMMA:
#Store the last comma, which precedes the metaclass
comma = child
elif type(child) == Node:
meta = equal = name = None
for arg in child.children:
if arg == Leaf(token.NAME, u"metaclass"):
#We have the (metaclass) part
meta = arg
elif meta and arg == Leaf(token.EQUAL, u"="):
#We have the (metaclass=) part
equal = arg
elif meta and equal:
#Here we go, we have (metaclass=X)
name = arg
results = (comma, meta, equal, name)
break
return results
class FixMetaclass(fixer_base.BaseFix):
PATTERN = u"""
classdef<any*>
"""
def transform(self, node, results):
meta_results = has_metaclass(node)
if not meta_results: return
for meta in meta_results:
meta.remove()
target = Leaf(token.NAME, u"__metaclass__")
equal = Leaf(token.EQUAL, u"=", prefix=u" ")
# meta is the last item in what was returned by has_metaclass(): name
name = meta
name.prefix = u" "
stmt_node = Node(syms.atom, [target, equal, name])
suitify(node)
for item in node.children:
if item.type == syms.suite:
for stmt in item.children:
if stmt.type == token.INDENT:
# Insert, in reverse order, the statement, a newline,
# and an indent right after the first indented line
loc = item.children.index(stmt) + 1
# Keep consistent indentation form
ident = Leaf(token.INDENT, stmt.value)
item.insert_child(loc, ident)
item.insert_child(loc, Newline())
item.insert_child(loc, stmt_node)
break
| bsd-3-clause |
mcgachey/edx-platform | cms/djangoapps/contentstore/views/tests/test_videos.py | 83 | 15190 | #-*- coding: utf-8 -*-
"""
Unit tests for video-related REST APIs.
"""
# pylint: disable=attribute-defined-outside-init
import csv
import json
import dateutil.parser
import re
from StringIO import StringIO
from django.conf import settings
from django.test.utils import override_settings
from mock import Mock, patch
from edxval.api import create_profile, create_video, get_video_info
from contentstore.models import VideoUploadConfig
from contentstore.views.videos import KEY_EXPIRATION_IN_SECONDS, StatusDisplayStrings
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from xmodule.modulestore.tests.factories import CourseFactory
class VideoUploadTestMixin(object):
"""
Test cases for the video upload feature
"""
def get_url_for_course_key(self, course_key):
"""Return video handler URL for the given course"""
return reverse_course_url(self.VIEW_NAME, course_key)
def setUp(self):
super(VideoUploadTestMixin, self).setUp()
self.url = self.get_url_for_course_key(self.course.id)
self.test_token = "test_token"
self.course.video_upload_pipeline = {
"course_video_upload_token": self.test_token,
}
self.save_course()
self.profiles = ["profile1", "profile2"]
self.previous_uploads = [
{
"edx_video_id": "test1",
"client_video_id": "test1.mp4",
"duration": 42.0,
"status": "upload",
"courses": [unicode(self.course.id)],
"encoded_videos": [],
},
{
"edx_video_id": "test2",
"client_video_id": "test2.mp4",
"duration": 128.0,
"status": "file_complete",
"courses": [unicode(self.course.id)],
"encoded_videos": [
{
"profile": "profile1",
"url": "http://example.com/profile1/test2.mp4",
"file_size": 1600,
"bitrate": 100,
},
{
"profile": "profile2",
"url": "http://example.com/profile2/test2.mov",
"file_size": 16000,
"bitrate": 1000,
},
],
},
{
"edx_video_id": "non-ascii",
"client_video_id": u"nón-ascii-näme.mp4",
"duration": 256.0,
"status": "transcode_active",
"courses": [unicode(self.course.id)],
"encoded_videos": [
{
"profile": "profile1",
"url": u"http://example.com/profile1/nón-ascii-näme.mp4",
"file_size": 3200,
"bitrate": 100,
},
]
},
]
# Ensure every status string is tested
self.previous_uploads += [
{
"edx_video_id": "status_test_{}".format(status),
"client_video_id": "status_test.mp4",
"duration": 3.14,
"status": status,
"courses": [unicode(self.course.id)],
"encoded_videos": [],
}
for status in (
StatusDisplayStrings._STATUS_MAP.keys() + # pylint:disable=protected-access
["non_existent_status"]
)
]
for profile in self.profiles:
create_profile(profile)
for video in self.previous_uploads:
create_video(video)
def _get_previous_upload(self, edx_video_id):
"""Returns the previous upload with the given video id."""
return next(
video
for video in self.previous_uploads
if video["edx_video_id"] == edx_video_id
)
def test_anon_user(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_put(self):
response = self.client.put(self.url)
self.assertEqual(response.status_code, 405)
def test_invalid_course_key(self):
response = self.client.get(
self.get_url_for_course_key("Non/Existent/Course")
)
self.assertEqual(response.status_code, 404)
def test_non_staff_user(self):
client, __ = self.create_non_staff_authed_user_client()
response = client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_video_pipeline_not_enabled(self):
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] = False
self.assertEqual(self.client.get(self.url).status_code, 404)
def test_video_pipeline_not_configured(self):
settings.VIDEO_UPLOAD_PIPELINE = None
self.assertEqual(self.client.get(self.url).status_code, 404)
def test_course_not_configured(self):
self.course.video_upload_pipeline = {}
self.save_course()
self.assertEqual(self.client.get(self.url).status_code, 404)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_VIDEO_UPLOAD_PIPELINE": True})
@override_settings(VIDEO_UPLOAD_PIPELINE={"BUCKET": "test_bucket", "ROOT_PATH": "test_root"})
class VideosHandlerTestCase(VideoUploadTestMixin, CourseTestCase):
"""Test cases for the main video upload endpoint"""
VIEW_NAME = "videos_handler"
def test_get_json(self):
response = self.client.get_json(self.url)
self.assertEqual(response.status_code, 200)
response_videos = json.loads(response.content)["videos"]
self.assertEqual(len(response_videos), len(self.previous_uploads))
for i, response_video in enumerate(response_videos):
# Videos should be returned by creation date descending
original_video = self.previous_uploads[-(i + 1)]
self.assertEqual(
set(response_video.keys()),
set(["edx_video_id", "client_video_id", "created", "duration", "status"])
)
dateutil.parser.parse(response_video["created"])
for field in ["edx_video_id", "client_video_id", "duration"]:
self.assertEqual(response_video[field], original_video[field])
self.assertEqual(
response_video["status"],
StatusDisplayStrings.get(original_video["status"])
)
def test_get_html(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertRegexpMatches(response["Content-Type"], "^text/html(;.*)?$")
# Crude check for presence of data in returned HTML
for video in self.previous_uploads:
self.assertIn(video["edx_video_id"], response.content)
def test_post_non_json(self):
response = self.client.post(self.url, {"files": []})
self.assertEqual(response.status_code, 400)
def test_post_malformed_json(self):
response = self.client.post(self.url, "{", content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_post_invalid_json(self):
def assert_bad(content):
"""Make request with content and assert that response is 400"""
response = self.client.post(
self.url,
json.dumps(content),
content_type="application/json"
)
self.assertEqual(response.status_code, 400)
# Top level missing files key
assert_bad({})
# Entry missing file_name
assert_bad({"files": [{"content_type": "video/mp4"}]})
# Entry missing content_type
assert_bad({"files": [{"file_name": "test.mp4"}]})
@override_settings(AWS_ACCESS_KEY_ID="test_key_id", AWS_SECRET_ACCESS_KEY="test_secret")
@patch("boto.s3.key.Key")
@patch("boto.s3.connection.S3Connection")
def test_post_success(self, mock_conn, mock_key):
files = [
{
"file_name": "first.mp4",
"content_type": "video/mp4",
},
{
"file_name": "second.webm",
"content_type": "video/webm",
},
{
"file_name": "third.mov",
"content_type": "video/quicktime",
},
{
"file_name": "fourth.mp4",
"content_type": "video/mp4",
},
]
bucket = Mock()
mock_conn.return_value = Mock(get_bucket=Mock(return_value=bucket))
mock_key_instances = [
Mock(
generate_url=Mock(
return_value="http://example.com/url_{}".format(file_info["file_name"])
)
)
for file_info in files
]
# If extra calls are made, return a dummy
mock_key.side_effect = mock_key_instances + [Mock()]
response = self.client.post(
self.url,
json.dumps({"files": files}),
content_type="application/json"
)
self.assertEqual(response.status_code, 200)
response_obj = json.loads(response.content)
mock_conn.assert_called_once_with(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
self.assertEqual(len(response_obj["files"]), len(files))
self.assertEqual(mock_key.call_count, len(files))
for i, file_info in enumerate(files):
# Ensure Key was set up correctly and extract id
key_call_args, __ = mock_key.call_args_list[i]
self.assertEqual(key_call_args[0], bucket)
path_match = re.match(
(
settings.VIDEO_UPLOAD_PIPELINE["ROOT_PATH"] +
"/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12})$"
),
key_call_args[1]
)
self.assertIsNotNone(path_match)
video_id = path_match.group(1)
mock_key_instance = mock_key_instances[i]
mock_key_instance.set_metadata.assert_any_call(
"course_video_upload_token",
self.test_token
)
mock_key_instance.set_metadata.assert_any_call(
"client_video_id",
file_info["file_name"]
)
mock_key_instance.set_metadata.assert_any_call("course_key", unicode(self.course.id))
mock_key_instance.generate_url.assert_called_once_with(
KEY_EXPIRATION_IN_SECONDS,
"PUT",
headers={"Content-Type": file_info["content_type"]}
)
# Ensure VAL was updated
val_info = get_video_info(video_id)
self.assertEqual(val_info["status"], "upload")
self.assertEqual(val_info["client_video_id"], file_info["file_name"])
self.assertEqual(val_info["status"], "upload")
self.assertEqual(val_info["duration"], 0)
self.assertEqual(val_info["courses"], [unicode(self.course.id)])
# Ensure response is correct
response_file = response_obj["files"][i]
self.assertEqual(response_file["file_name"], file_info["file_name"])
self.assertEqual(response_file["upload_url"], mock_key_instance.generate_url())
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_VIDEO_UPLOAD_PIPELINE": True})
@override_settings(VIDEO_UPLOAD_PIPELINE={"BUCKET": "test_bucket", "ROOT_PATH": "test_root"})
class VideoUrlsCsvTestCase(VideoUploadTestMixin, CourseTestCase):
"""Test cases for the CSV download endpoint for video uploads"""
VIEW_NAME = "video_encodings_download"
def setUp(self):
super(VideoUrlsCsvTestCase, self).setUp()
VideoUploadConfig(profile_whitelist="profile1").save()
def _check_csv_response(self, expected_profiles):
"""
Check that the response is a valid CSV response containing rows
corresponding to previous_uploads and including the expected profiles.
"""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["Content-Disposition"],
"attachment; filename={course}_video_urls.csv".format(course=self.course.id.course)
)
response_reader = StringIO(response.content)
reader = csv.DictReader(response_reader, dialect=csv.excel)
self.assertEqual(
reader.fieldnames,
(
["Name", "Duration", "Date Added", "Video ID", "Status"] +
["{} URL".format(profile) for profile in expected_profiles]
)
)
rows = list(reader)
self.assertEqual(len(rows), len(self.previous_uploads))
for i, row in enumerate(rows):
response_video = {
key.decode("utf-8"): value.decode("utf-8") for key, value in row.items()
}
# Videos should be returned by creation date descending
original_video = self.previous_uploads[-(i + 1)]
self.assertEqual(response_video["Name"], original_video["client_video_id"])
self.assertEqual(response_video["Duration"], str(original_video["duration"]))
dateutil.parser.parse(response_video["Date Added"])
self.assertEqual(response_video["Video ID"], original_video["edx_video_id"])
self.assertEqual(response_video["Status"], StatusDisplayStrings.get(original_video["status"]))
for profile in expected_profiles:
response_profile_url = response_video["{} URL".format(profile)]
original_encoded_for_profile = next(
(
original_encoded
for original_encoded in original_video["encoded_videos"]
if original_encoded["profile"] == profile
),
None
)
if original_encoded_for_profile:
self.assertEqual(response_profile_url, original_encoded_for_profile["url"])
else:
self.assertEqual(response_profile_url, "")
def test_basic(self):
self._check_csv_response(["profile1"])
def test_profile_whitelist(self):
VideoUploadConfig(profile_whitelist="profile1,profile2").save()
self._check_csv_response(["profile1", "profile2"])
def test_non_ascii_course(self):
course = CourseFactory.create(
number=u"nón-äscii",
video_upload_pipeline={
"course_video_upload_token": self.test_token,
}
)
response = self.client.get(self.get_url_for_course_key(course.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["Content-Disposition"],
"attachment; filename=video_urls.csv; filename*=utf-8''n%C3%B3n-%C3%A4scii_video_urls.csv"
)
| agpl-3.0 |
rickshawman/twitter | venv/lib/python2.7/site-packages/pip/utils/build.py | 899 | 1312 | from __future__ import absolute_import
import os.path
import tempfile
from pip.utils import rmtree
class BuildDirectory(object):
def __init__(self, name=None, delete=None):
# If we were not given an explicit directory, and we were not given an
# explicit delete option, then we'll default to deleting.
if name is None and delete is None:
delete = True
if name is None:
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
name = os.path.realpath(tempfile.mkdtemp(prefix="pip-build-"))
# If we were not given an explicit directory, and we were not given
# an explicit delete option, then we'll default to deleting.
if delete is None:
delete = True
self.name = name
self.delete = delete
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self.delete:
rmtree(self.name)
| gpl-3.0 |
sysalexis/kbengine | kbe/res/scripts/common/Lib/tkinter/test/test_tkinter/test_loadtk.py | 162 | 1503 | import os
import sys
import unittest
import test.support as test_support
from tkinter import Tcl, TclError
test_support.requires('gui')
class TkLoadTest(unittest.TestCase):
@unittest.skipIf('DISPLAY' not in os.environ, 'No $DISPLAY set.')
def testLoadTk(self):
tcl = Tcl()
self.assertRaises(TclError,tcl.winfo_geometry)
tcl.loadtk()
self.assertEqual('1x1+0+0', tcl.winfo_geometry())
tcl.destroy()
def testLoadTkFailure(self):
old_display = None
if sys.platform.startswith(('win', 'darwin', 'cygwin')):
# no failure possible on windows?
# XXX Maybe on tk older than 8.4.13 it would be possible,
# see tkinter.h.
return
with test_support.EnvironmentVarGuard() as env:
if 'DISPLAY' in os.environ:
del env['DISPLAY']
# on some platforms, deleting environment variables
# doesn't actually carry through to the process level
# because they don't support unsetenv
# If that's the case, abort.
with os.popen('echo $DISPLAY') as pipe:
display = pipe.read().strip()
if display:
return
tcl = Tcl()
self.assertRaises(TclError, tcl.winfo_geometry)
self.assertRaises(TclError, tcl.loadtk)
tests_gui = (TkLoadTest, )
if __name__ == "__main__":
test_support.run_unittest(*tests_gui)
| lgpl-3.0 |
Trafire/purchaseorders | lib/python2.7/site-packages/Flask-0.11.1-py2.7.egg/flask/json.py | 48 | 9261 | # -*- coding: utf-8 -*-
"""
flask.jsonimpl
~~~~~~~~~~~~~~
Implementation helpers for the JSON support in Flask.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import io
import uuid
from datetime import date
from .globals import current_app, request
from ._compat import text_type, PY2
from werkzeug.http import http_date
from jinja2 import Markup
# Use the same json implementation as itsdangerous on which we
# depend anyways.
try:
from itsdangerous import simplejson as _json
except ImportError:
from itsdangerous import json as _json
# Figure out if simplejson escapes slashes. This behavior was changed
# from one version to another without reason.
_slash_escape = '\\/' not in _json.dumps('/')
__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
'jsonify']
def _wrap_reader_for_text(fp, encoding):
if isinstance(fp.read(0), bytes):
fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
return fp
def _wrap_writer_for_text(fp, encoding):
try:
fp.write('')
except TypeError:
fp = io.TextIOWrapper(fp, encoding)
return fp
class JSONEncoder(_json.JSONEncoder):
"""The default Flask JSON encoder. This one extends the default simplejson
encoder by also supporting ``datetime`` objects, ``UUID`` as well as
``Markup`` objects which are serialized as RFC 822 datetime strings (same
as the HTTP date format). In order to support more data types override the
:meth:`default` method.
"""
def default(self, o):
"""Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a :exc:`TypeError`).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
if isinstance(o, date):
return http_date(o.timetuple())
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, '__html__'):
return text_type(o.__html__())
return _json.JSONEncoder.default(self, o)
class JSONDecoder(_json.JSONDecoder):
"""The default JSON decoder. This one does not change the behavior from
the default simplejson decoder. Consult the :mod:`json` documentation
for more information. This decoder is not only used for the load
functions of this module but also :attr:`~flask.Request`.
"""
def _dump_arg_defaults(kwargs):
"""Inject default arguments for dump functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_encoder)
if not current_app.config['JSON_AS_ASCII']:
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
else:
kwargs.setdefault('sort_keys', True)
kwargs.setdefault('cls', JSONEncoder)
def _load_arg_defaults(kwargs):
"""Inject default arguments for load functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_decoder)
else:
kwargs.setdefault('cls', JSONDecoder)
def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack.
This function can return ``unicode`` strings or ascii-only bytestrings by
default which coerce into unicode strings automatically. That behavior by
default is controlled by the ``JSON_AS_ASCII`` configuration variable
and can be overridden by the simplejson ``ensure_ascii`` parameter.
"""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
rv = _json.dumps(obj, **kwargs)
if encoding is not None and isinstance(rv, text_type):
rv = rv.encode(encoding)
return rv
def dump(obj, fp, **kwargs):
"""Like :func:`dumps` but writes into a file object."""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
if encoding is not None:
fp = _wrap_writer_for_text(fp, encoding)
_json.dump(obj, fp, **kwargs)
def loads(s, **kwargs):
"""Unserialize a JSON object from a string ``s`` by using the application's
configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
application on the stack.
"""
_load_arg_defaults(kwargs)
if isinstance(s, bytes):
s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
return _json.loads(s, **kwargs)
def load(fp, **kwargs):
"""Like :func:`loads` but reads from a file object.
"""
_load_arg_defaults(kwargs)
if not PY2:
fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
return _json.load(fp, **kwargs)
def htmlsafe_dumps(obj, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
"""
rv = dumps(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
if not _slash_escape:
rv = rv.replace('\\/', '/')
return rv
def htmlsafe_dump(obj, fp, **kwargs):
"""Like :func:`htmlsafe_dumps` but writes into a file object."""
fp.write(text_type(htmlsafe_dumps(obj, **kwargs)))
def jsonify(*args, **kwargs):
"""This function wraps :func:`dumps` to add a few enhancements that make
life easier. It turns the JSON output into a :class:`~flask.Response`
object with the :mimetype:`application/json` mimetype. For convenience, it
also converts multiple arguments into an array or multiple keyword arguments
into a dict. This means that both ``jsonify(1,2,3)`` and
``jsonify([1,2,3])`` serialize to ``[1,2,3]``.
For clarity, the JSON serialization behavior has the following differences
from :func:`dumps`:
1. Single argument: Passed straight through to :func:`dumps`.
2. Multiple arguments: Converted to an array before being passed to
:func:`dumps`.
3. Multiple keyword arguments: Converted to a dict before being passed to
:func:`dumps`.
4. Both args and kwargs: Behavior undefined and will throw an exception.
Example usage::
from flask import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
.. versionchanged:: 0.11
Added support for serializing top-level arrays. This introduces a
security risk in ancient browsers. See :ref:`json-security` for details.
This function's response will be pretty printed if it was not requested
with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless
the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false.
Compressed (not pretty) formatting currently means no indents and no
spaces after separators.
.. versionadded:: 0.2
"""
indent = None
separators = (',', ':')
if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and not request.is_xhr:
indent = 2
separators = (', ', ': ')
if args and kwargs:
raise TypeError('jsonify() behavior undefined when passed both args and kwargs')
elif len(args) == 1: # single args are passed directly to dumps()
data = args[0]
else:
data = args or kwargs
return current_app.response_class(
(dumps(data, indent=indent, separators=separators), '\n'),
mimetype=current_app.config['JSONIFY_MIMETYPE']
)
def tojson_filter(obj, **kwargs):
return Markup(htmlsafe_dumps(obj, **kwargs))
| mit |
turicas/rows | tests/tests_fields.py | 1 | 23936 | # coding: utf-8
# Copyright 2014-2020 Álvaro Justen <https://github.com/turicas/rows/>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
import json
import platform
import unittest
import uuid
from base64 import b64encode
from decimal import Decimal
import six
import rows
from rows import fields
if platform.system() == "Windows":
locale_name = "ptb_bra"
else:
locale_name = "pt_BR.UTF-8"
class FieldsTestCase(unittest.TestCase):
def test_Field(self):
self.assertEqual(fields.Field.TYPE, (type(None),))
self.assertIs(fields.Field.deserialize(None), None)
self.assertEqual(fields.Field.deserialize("Álvaro"), "Álvaro")
self.assertEqual(fields.Field.serialize(None), "")
self.assertIs(type(fields.Field.serialize(None)), six.text_type)
self.assertEqual(fields.Field.serialize("Álvaro"), "Álvaro")
self.assertIs(type(fields.Field.serialize("Álvaro")), six.text_type)
def test_BinaryField(self):
deserialized = "Álvaro".encode("utf-8")
serialized = b64encode(deserialized).decode("ascii")
self.assertEqual(type(deserialized), six.binary_type)
self.assertEqual(type(serialized), six.text_type)
self.assertEqual(fields.BinaryField.TYPE, (bytes,))
self.assertEqual(fields.BinaryField.serialize(None), "")
self.assertIs(type(fields.BinaryField.serialize(None)), six.text_type)
self.assertEqual(fields.BinaryField.serialize(deserialized), serialized)
self.assertIs(type(fields.BinaryField.serialize(deserialized)), six.text_type)
with self.assertRaises(ValueError):
fields.BinaryField.serialize(42)
with self.assertRaises(ValueError):
fields.BinaryField.serialize(3.14)
with self.assertRaises(ValueError):
fields.BinaryField.serialize("Álvaro")
with self.assertRaises(ValueError):
fields.BinaryField.serialize("123")
self.assertIs(fields.BinaryField.deserialize(None), b"")
self.assertEqual(fields.BinaryField.deserialize(serialized), deserialized)
self.assertIs(type(fields.BinaryField.deserialize(serialized)), six.binary_type)
with self.assertRaises(ValueError):
fields.BinaryField.deserialize(42)
with self.assertRaises(ValueError):
fields.BinaryField.deserialize(3.14)
with self.assertRaises(ValueError):
fields.BinaryField.deserialize("Álvaro")
self.assertEqual(fields.BinaryField.deserialize(deserialized), deserialized)
self.assertEqual(fields.BinaryField.deserialize(serialized), deserialized)
self.assertEqual(
fields.BinaryField.deserialize(serialized.encode("ascii")),
serialized.encode("ascii"),
)
def test_BoolField(self):
self.assertEqual(fields.BoolField.TYPE, (bool,))
self.assertEqual(fields.BoolField.serialize(None), "")
false_values = ("False", "false", "no", False)
for value in false_values:
self.assertIs(fields.BoolField.deserialize(value), False)
self.assertIs(fields.BoolField.deserialize(None), None)
self.assertEqual(fields.BoolField.deserialize(""), None)
true_values = ("True", "true", "yes", True)
for value in true_values:
self.assertIs(fields.BoolField.deserialize(value), True)
self.assertEqual(fields.BoolField.serialize(False), "false")
self.assertIs(type(fields.BoolField.serialize(False)), six.text_type)
self.assertEqual(fields.BoolField.serialize(True), "true")
self.assertIs(type(fields.BoolField.serialize(True)), six.text_type)
# '0' and '1' should be not accepted as boolean values because the
# sample could not contain other integers but the actual type could be
# integer
with self.assertRaises(ValueError):
fields.BoolField.deserialize("0")
with self.assertRaises(ValueError):
fields.BoolField.deserialize(b"0")
with self.assertRaises(ValueError):
fields.BoolField.deserialize("1")
with self.assertRaises(ValueError):
fields.BoolField.deserialize(b"1")
def test_IntegerField(self):
self.assertEqual(fields.IntegerField.TYPE, (int,))
self.assertEqual(fields.IntegerField.serialize(None), "")
self.assertIs(type(fields.IntegerField.serialize(None)), six.text_type)
self.assertIn(
type(fields.IntegerField.deserialize("42")), fields.IntegerField.TYPE
)
self.assertEqual(fields.IntegerField.deserialize("42"), 42)
self.assertEqual(fields.IntegerField.deserialize(42), 42)
self.assertEqual(fields.IntegerField.serialize(42), "42")
self.assertIs(type(fields.IntegerField.serialize(42)), six.text_type)
self.assertEqual(fields.IntegerField.deserialize(None), None)
self.assertEqual(
fields.IntegerField.deserialize("10152709355006317"), 10152709355006317
)
with rows.locale_context(locale_name):
self.assertEqual(fields.IntegerField.serialize(42000), "42000")
self.assertIs(type(fields.IntegerField.serialize(42000)), six.text_type)
self.assertEqual(
fields.IntegerField.serialize(42000, grouping=True), "42.000"
)
self.assertEqual(fields.IntegerField.deserialize("42.000"), 42000)
self.assertEqual(fields.IntegerField.deserialize(42), 42)
self.assertEqual(fields.IntegerField.deserialize(42.0), 42)
with self.assertRaises(ValueError):
fields.IntegerField.deserialize(1.23)
with self.assertRaises(ValueError):
fields.IntegerField.deserialize("013")
self.assertEqual(fields.IntegerField.deserialize("0"), 0)
def test_FloatField(self):
self.assertEqual(fields.FloatField.TYPE, (float,))
self.assertEqual(fields.FloatField.serialize(None), "")
self.assertIs(type(fields.FloatField.serialize(None)), six.text_type)
self.assertIn(
type(fields.FloatField.deserialize("42.0")), fields.FloatField.TYPE
)
self.assertEqual(fields.FloatField.deserialize("42.0"), 42.0)
self.assertEqual(fields.FloatField.deserialize(42.0), 42.0)
self.assertEqual(fields.FloatField.deserialize(42), 42.0)
self.assertEqual(fields.FloatField.deserialize(None), None)
self.assertEqual(fields.FloatField.serialize(42.0), "42.0")
self.assertIs(type(fields.FloatField.serialize(42.0)), six.text_type)
with rows.locale_context(locale_name):
self.assertEqual(fields.FloatField.serialize(42000.0), "42000,000000")
self.assertIs(type(fields.FloatField.serialize(42000.0)), six.text_type)
self.assertEqual(
fields.FloatField.serialize(42000, grouping=True), "42.000,000000"
)
self.assertEqual(fields.FloatField.deserialize("42.000,00"), 42000.0)
self.assertEqual(fields.FloatField.deserialize(42), 42.0)
self.assertEqual(fields.FloatField.deserialize(42.0), 42.0)
def test_DecimalField(self):
deserialized = Decimal("42.010")
self.assertEqual(fields.DecimalField.TYPE, (Decimal,))
self.assertEqual(fields.DecimalField.serialize(None), "")
self.assertIs(type(fields.DecimalField.serialize(None)), six.text_type)
self.assertEqual(fields.DecimalField.deserialize(""), None)
self.assertIn(
type(fields.DecimalField.deserialize("42.0")), fields.DecimalField.TYPE
)
self.assertEqual(fields.DecimalField.deserialize("42.0"), Decimal("42.0"))
self.assertEqual(fields.DecimalField.deserialize(deserialized), deserialized)
self.assertEqual(fields.DecimalField.serialize(deserialized), "42.010")
self.assertEqual(
type(fields.DecimalField.serialize(deserialized)), six.text_type
)
self.assertEqual(
fields.DecimalField.deserialize("21.21657469231"), Decimal("21.21657469231")
)
self.assertEqual(fields.DecimalField.deserialize("-21.34"), Decimal("-21.34"))
self.assertEqual(fields.DecimalField.serialize(Decimal("-21.34")), "-21.34")
self.assertEqual(fields.DecimalField.deserialize(None), None)
with rows.locale_context(locale_name):
self.assertEqual(
six.text_type, type(fields.DecimalField.serialize(deserialized))
)
self.assertEqual(fields.DecimalField.serialize(Decimal("4200")), "4200")
self.assertEqual(fields.DecimalField.serialize(Decimal("42.0")), "42,0")
self.assertEqual(
fields.DecimalField.serialize(Decimal("42000.0")), "42000,0"
)
self.assertEqual(fields.DecimalField.serialize(Decimal("-42.0")), "-42,0")
self.assertEqual(
fields.DecimalField.deserialize("42.000,00"), Decimal("42000.00")
)
self.assertEqual(
fields.DecimalField.deserialize("-42.000,00"), Decimal("-42000.00")
)
self.assertEqual(
fields.DecimalField.serialize(Decimal("42000.0"), grouping=True),
"42.000,0",
)
self.assertEqual(fields.DecimalField.deserialize(42000), Decimal("42000"))
self.assertEqual(fields.DecimalField.deserialize(42000.0), Decimal("42000"))
def test_PercentField(self):
deserialized = Decimal("0.42010")
self.assertEqual(fields.PercentField.TYPE, (Decimal,))
self.assertIn(
type(fields.PercentField.deserialize("42.0%")), fields.PercentField.TYPE
)
self.assertEqual(fields.PercentField.deserialize("42.0%"), Decimal("0.420"))
self.assertEqual(
fields.PercentField.deserialize(Decimal("0.420")), Decimal("0.420")
)
self.assertEqual(fields.PercentField.deserialize(deserialized), deserialized)
self.assertEqual(fields.PercentField.deserialize(None), None)
self.assertEqual(fields.PercentField.serialize(deserialized), "42.010%")
self.assertEqual(
type(fields.PercentField.serialize(deserialized)), six.text_type
)
self.assertEqual(fields.PercentField.serialize(Decimal("42.010")), "4201.0%")
self.assertEqual(fields.PercentField.serialize(Decimal("0")), "0.00%")
self.assertEqual(fields.PercentField.serialize(None), "")
self.assertEqual(fields.PercentField.serialize(Decimal("0.01")), "1%")
with rows.locale_context(locale_name):
self.assertEqual(
type(fields.PercentField.serialize(deserialized)), six.text_type
)
self.assertEqual(fields.PercentField.serialize(Decimal("42.0")), "4200%")
self.assertEqual(
fields.PercentField.serialize(Decimal("42000.0")), "4200000%"
)
self.assertEqual(
fields.PercentField.deserialize("42.000,00%"), Decimal("420.0000")
)
self.assertEqual(
fields.PercentField.serialize(Decimal("42000.00"), grouping=True),
"4.200.000%",
)
with self.assertRaises(ValueError):
fields.PercentField.deserialize(42)
def test_DateField(self):
# TODO: test timezone-aware datetime.date
serialized = "2015-05-27"
deserialized = datetime.date(2015, 5, 27)
self.assertEqual(fields.DateField.TYPE, (datetime.date,))
self.assertEqual(fields.DateField.serialize(None), "")
self.assertIs(type(fields.DateField.serialize(None)), six.text_type)
self.assertIn(
type(fields.DateField.deserialize(serialized)), fields.DateField.TYPE
)
self.assertEqual(fields.DateField.deserialize(serialized), deserialized)
self.assertEqual(fields.DateField.deserialize(deserialized), deserialized)
self.assertEqual(fields.DateField.deserialize(None), None)
self.assertEqual(fields.DateField.deserialize(""), None)
self.assertEqual(fields.DateField.serialize(deserialized), serialized)
self.assertIs(type(fields.DateField.serialize(deserialized)), six.text_type)
with self.assertRaises(ValueError):
fields.DateField.deserialize(42)
with self.assertRaises(ValueError):
fields.DateField.deserialize(serialized + "T00:00:00")
with self.assertRaises(ValueError):
fields.DateField.deserialize("Álvaro")
with self.assertRaises(ValueError):
fields.DateField.deserialize(serialized.encode("utf-8"))
def test_DatetimeField(self):
# TODO: test timezone-aware datetime.date
serialized = "2015-05-27T01:02:03"
self.assertEqual(fields.DatetimeField.TYPE, (datetime.datetime,))
deserialized = fields.DatetimeField.deserialize(serialized)
self.assertIn(type(deserialized), fields.DatetimeField.TYPE)
self.assertEqual(fields.DatetimeField.serialize(None), "")
self.assertIs(type(fields.DatetimeField.serialize(None)), six.text_type)
value = datetime.datetime(2015, 5, 27, 1, 2, 3)
self.assertEqual(fields.DatetimeField.deserialize(serialized), value)
self.assertEqual(fields.DatetimeField.deserialize(deserialized), deserialized)
self.assertEqual(fields.DatetimeField.deserialize(None), None)
self.assertEqual(fields.DatetimeField.serialize(value), serialized)
self.assertIs(type(fields.DatetimeField.serialize(value)), six.text_type)
with self.assertRaises(ValueError):
fields.DatetimeField.deserialize(42)
with self.assertRaises(ValueError):
fields.DatetimeField.deserialize("2015-01-01")
with self.assertRaises(ValueError):
fields.DatetimeField.deserialize("Álvaro")
with self.assertRaises(ValueError):
fields.DatetimeField.deserialize(serialized.encode("utf-8"))
def test_EmailField(self):
# TODO: accept spaces also
serialized = "test@domain.com"
self.assertEqual(fields.EmailField.TYPE, (six.text_type,))
deserialized = fields.EmailField.deserialize(serialized)
self.assertIn(type(deserialized), fields.EmailField.TYPE)
self.assertEqual(fields.EmailField.serialize(None), "")
self.assertIs(type(fields.EmailField.serialize(None)), six.text_type)
self.assertEqual(fields.EmailField.serialize(serialized), serialized)
self.assertEqual(fields.EmailField.deserialize(serialized), serialized)
self.assertEqual(fields.EmailField.deserialize(None), None)
self.assertEqual(fields.EmailField.deserialize(""), None)
self.assertIs(type(fields.EmailField.serialize(serialized)), six.text_type)
with self.assertRaises(ValueError):
fields.EmailField.deserialize(42)
with self.assertRaises(ValueError):
fields.EmailField.deserialize("2015-01-01")
with self.assertRaises(ValueError):
fields.EmailField.deserialize("Álvaro")
with self.assertRaises(ValueError):
fields.EmailField.deserialize("test@example.com".encode("utf-8"))
def test_TextField(self):
self.assertEqual(fields.TextField.TYPE, (six.text_type,))
self.assertEqual(fields.TextField.serialize(None), "")
self.assertIs(type(fields.TextField.serialize(None)), six.text_type)
self.assertIn(type(fields.TextField.deserialize("test")), fields.TextField.TYPE)
self.assertEqual(fields.TextField.deserialize("Álvaro"), "Álvaro")
self.assertIs(fields.TextField.deserialize(None), None)
self.assertIs(fields.TextField.deserialize(""), "")
self.assertEqual(fields.TextField.serialize("Álvaro"), "Álvaro")
self.assertIs(type(fields.TextField.serialize("Álvaro")), six.text_type)
with self.assertRaises(ValueError) as exception_context:
fields.TextField.deserialize("Álvaro".encode("utf-8"))
self.assertEqual(exception_context.exception.args[0], "Binary is not supported")
def test_JSONField(self):
self.assertEqual(fields.JSONField.TYPE, (list, dict))
self.assertEqual(type(fields.JSONField.deserialize("[]")), list)
self.assertEqual(type(fields.JSONField.deserialize("{}")), dict)
deserialized = {"a": 123, "b": 3.14, "c": [42, 24]}
serialized = json.dumps(deserialized)
self.assertEqual(fields.JSONField.deserialize(serialized), deserialized)
def test_UUIDField(self):
with self.assertRaises(ValueError) as exception_context:
fields.UUIDField.deserialize("not an UUID value")
with self.assertRaises(ValueError) as exception_context:
# "z" not hex
fields.UUIDField.deserialize("z" * 32)
fields.UUIDField.deserialize("a" * 32) # no exception should be raised
data = uuid.uuid4()
assert fields.UUIDField.deserialize(data) == data
assert fields.UUIDField.deserialize(str(data)) == data
assert fields.UUIDField.deserialize(str(data).replace("-", "")) == data
class FieldUtilsTestCase(unittest.TestCase):
maxDiff = None
def setUp(self):
with open("tests/data/all-field-types.csv", "rb") as fobj:
data = fobj.read().decode("utf-8")
lines = [line.split(",") for line in data.splitlines()]
self.fields = lines[0]
self.data = lines[1:]
self.expected = {
"bool_column": fields.BoolField,
"integer_column": fields.IntegerField,
"float_column": fields.FloatField,
"decimal_column": fields.FloatField,
"percent_column": fields.PercentField,
"date_column": fields.DateField,
"datetime_column": fields.DatetimeField,
"unicode_column": fields.TextField,
}
def test_slug(self):
self.assertEqual(fields.slug(None), "")
self.assertEqual(fields.slug("Álvaro Justen"), "alvaro_justen")
self.assertEqual(fields.slug("Moe's Bar"), "moe_s_bar")
self.assertEqual(fields.slug("-----te-----st------"), "te_st")
self.assertEqual(fields.slug("first line\nsecond line"), "first_line_second_line")
self.assertEqual(fields.slug("first/second"), "first_second")
self.assertEqual(fields.slug("first\xa0second"), "first_second")
# As in <https://github.com/turicas/rows/issues/179>
self.assertEqual(
fields.slug('Query Occurrence"( % ),"First Seen'),
"query_occurrence_first_seen",
)
self.assertEqual(fields.slug(" ÁLVARO justen% "), "alvaro_justen")
self.assertEqual(fields.slug(42), "42")
self.assertEqual(fields.slug("^test"), "test")
self.assertEqual(fields.slug("^test", permitted_chars=fields.SLUG_CHARS + "^"), "^test")
self.assertEqual(fields.slug("this/is\ta\ntest", separator="-"), "this-is-a-test")
def test_detect_types_no_sample(self):
expected = {key: fields.TextField for key in self.expected.keys()}
result = fields.detect_types(self.fields, [])
self.assertDictEqual(dict(result), expected)
def test_detect_types_binary(self):
# first, try values as (`bytes`/`str`)
expected = {key: fields.BinaryField for key in self.expected.keys()}
values = [
[b"some binary data" for _ in range(len(self.data[0]))] for __ in range(20)
]
result = fields.detect_types(self.fields, values)
self.assertDictEqual(dict(result), expected)
# second, try base64-encoded values (as `str`/`unicode`)
expected = {key: fields.TextField for key in self.expected.keys()}
values = [
[b64encode(value.encode("utf-8")).decode("ascii") for value in row]
for row in self.data
]
result = fields.detect_types(self.fields, values)
self.assertDictEqual(dict(result), expected)
def test_detect_types(self):
result = fields.detect_types(self.fields, self.data)
self.assertDictEqual(dict(result), self.expected)
def test_detect_types_different_number_of_fields(self):
result = fields.detect_types(["f1", "f2"], [["a", "b", "c"]])
self.assertEqual(list(result.keys()), ["f1", "f2", "field_2"])
def test_empty_sequences_should_not_be_bool(self):
result = fields.detect_types(["field_1"], [[""], [""]])["field_1"]
expected = fields.TextField
self.assertEqual(result, expected)
def test_precedence(self):
field_types = [
("bool", fields.BoolField),
("integer", fields.IntegerField),
("float", fields.FloatField),
("datetime", fields.DatetimeField),
("date", fields.DateField),
("float", fields.FloatField),
("percent", fields.PercentField),
("json", fields.JSONField),
("email", fields.EmailField),
("binary1", fields.BinaryField),
("binary2", fields.BinaryField),
("text", fields.TextField),
]
data = [
[
"false",
"42",
"3.14",
"2016-08-15T05:21:10",
"2016-08-15",
"2.71",
"76.38%",
'{"key": "value"}',
"test@example.com",
b"cHl0aG9uIHJ1bGVz",
b"python rules",
"Álvaro Justen",
]
]
result = fields.detect_types(
[item[0] for item in field_types],
data,
field_types=[item[1] for item in field_types],
)
self.assertDictEqual(dict(result), dict(field_types))
class FieldsFunctionsTestCase(unittest.TestCase):
def test_is_null(self):
self.assertTrue(fields.is_null(None))
self.assertTrue(fields.is_null(""))
self.assertTrue(fields.is_null(" \t "))
self.assertTrue(fields.is_null("null"))
self.assertTrue(fields.is_null("nil"))
self.assertTrue(fields.is_null("none"))
self.assertTrue(fields.is_null("-"))
self.assertFalse(fields.is_null("Álvaro"))
self.assertFalse(fields.is_null("Álvaro".encode("utf-8")))
def test_as_string(self):
self.assertEqual(fields.as_string(None), "None")
self.assertEqual(fields.as_string(42), "42")
self.assertEqual(fields.as_string(3.141592), "3.141592")
self.assertEqual(fields.as_string("Álvaro"), "Álvaro")
with self.assertRaises(ValueError) as exception_context:
fields.as_string("Álvaro".encode("utf-8"))
self.assertEqual(exception_context.exception.args[0], "Binary is not supported")
def test_get_items(self):
func = fields.get_items(2)
self.assertEqual(func("a b c d e f".split()), ("c",))
func = fields.get_items(0, 2, 3)
self.assertEqual(func("a b c d e f".split()), ("a", "c", "d"))
self.assertEqual(func("a b c".split()), ("a", "c", None))
| lgpl-3.0 |
noba3/KoTos | addons/plugin.video.p2p-streams/resources/core/parsers/arenavision/main.py | 2 | 4389 | # -*- coding: utf-8 -*-
"""
This plugin is 3rd party and not part of p2p-streams addon
Arenavision.in
"""
import sys,os,requests
current_dir = os.path.dirname(os.path.realpath(__file__))
basename = os.path.basename(current_dir)
core_dir = current_dir.replace(basename,'').replace('parsers','')
sys.path.append(core_dir)
from peertopeerutils.webutils import *
from peertopeerutils.pluginxbmc import *
from peertopeerutils.directoryhandle import *
import acestream as ace
import sopcast as sop
from cleaner import *
base_url = "http://www.arenavision.in"
def module_tree(name,url,iconimage,mode,parser,parserfunction):
if not parserfunction: arenavision_menu()
elif parserfunction == "arenavision_streams": arenavision_streams(name,url)
elif parserfunction == "arenavision_schedule": arenavision_schedule(url)
elif parserfunction == "arenavision_chooser": arenavision_chooser(url)
def arenavision_menu():
headers = {
"Cookie" : "beget=begetok; has_js=1;"
}
try:
source = requests.get(base_url,headers=headers).text
except: source="";xbmcgui.Dialog().ok(translate(40000),translate(40128))
if source:
match = re.compile('leaf"><a href="(.+?)">(.+?)</a').findall(source)
for link, nome in match:
if "agenda" in nome.lower():
addDir("[B][COLOR red]Agenda/Schedule[/COLOR][/B]",base_url+link,401,os.path.join(current_dir,"icon.png"),1,True,parser="arenavision",parserfunction="arenavision_schedule")
elif "#" in nome.lower() or "av" in nome.lower() or "arenavision" in nome.lower():
addDir(nome,base_url+link,401,os.path.join(current_dir,"icon.png"),1,False,parser="arenavision",parserfunction="arenavision_streams")
else: pass
def arenavision_streams(name,url):
headers = {
"Cookie" : "beget=begetok; has_js=1;"
}
try:
source = requests.get(url,headers=headers).text
except: source="";xbmcgui.Dialog().ok(translate(40000),translate(40128))
if source:
match = re.compile('sop://(.+?)"').findall(source)
if match: sop.sopstreams(name,os.path.join(current_dir,"icon.png"),"sop://" + match[0])
else:
match = re.compile('this.loadPlayer\("(.+?)"').findall(source)
if match: ace.acestreams(name,os.path.join(current_dir,"icon.png"),match[0])
else: xbmcgui.Dialog().ok(translate(40000),translate(40022))
def arenavision_schedule(url):
headers = {
"Cookie" : "beget=begetok; has_js=1;"
}
try:
source = requests.get(url,headers=headers).text
except: source="";xbmcgui.Dialog().ok(translate(40000),translate(40128))
if source:
match = re.findall('Bruselas(.*?)</footer>', source, re.DOTALL)
for event in match:
eventmatch = re.compile('(\d+)/(\d+)/(\d+) (.+?):(.+?) CET (.+?)<').findall(event)
for dia,mes,year,hour,minute,evento in eventmatch:
try:
import datetime
from peertopeerutils import pytzimp
d = pytzimp.timezone(str(pytzimp.timezone('Europe/Madrid'))).localize(datetime.datetime(2000 + int(year), int(mes), int(dia), hour=int(hour), minute=int(minute)))
timezona= settings.getSetting('timezone_new')
my_location=pytzimp.timezone(pytzimp.all_timezones[int(timezona)])
convertido=d.astimezone(my_location)
fmt = "%d-%m-%y %H:%M"
time=convertido.strftime(fmt)
except:
time='N/A'
event_array = evento.split('/')
event_name = ''
event_channels = []
i = 1
for parc in event_array:
if 'AV' in parc:
event_channels.append(parc)
try: addDir('[B][COLOR red]' + time + '[/B][/COLOR] ' + removeNonAscii(clean(event_array[0])),str(event_channels),401,os.path.join(current_dir,"icon.png"),1,False,parser="arenavision",parserfunction="arenavision_chooser")
except:pass
def arenavision_chooser(url):
dictionary = eval(url)
index = xbmcgui.Dialog().select("On...", dictionary)
if index > -1:
headers = {
"Cookie" : "beget=begetok; has_js=1;"
}
try:
source = requests.get(base_url,headers=headers).text
except: source="";xbmcgui.Dialog().ok(translate(40000),translate(40128))
if source:
match = re.compile('leaf"><a href="(.+?)">(.+?)</a').findall(source)
for link,name in match:
if (dictionary[index] == name) or (dictionary[index] == name.replace('AV','#')) or (dictionary[index] == name.replace('#','AV')) or (dictionary[index] == name.replace('ArenaVision ','AV')):
arenavision_streams(name,base_url+link)
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
| gpl-2.0 |
playm2mboy/edx-platform | lms/envs/dev.py | 3 | 11326 | """
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
from .common import *
DEBUG = True
TEMPLATE_DEBUG = True
HTTPS = 'off'
FEATURES['DISABLE_START_DATES'] = False
FEATURES['ENABLE_SQL_TRACKING_LOGS'] = True
FEATURES['SUBDOMAIN_COURSE_LISTINGS'] = False # Enable to test subdomains--otherwise, want all courses to show up
FEATURES['SUBDOMAIN_BRANDING'] = True
FEATURES['FORCE_UNIVERSITY_DOMAIN'] = None # show all university courses if in dev (ie don't use HTTP_HOST)
FEATURES['ENABLE_MANUAL_GIT_RELOAD'] = True
FEATURES['ENABLE_PSYCHOMETRICS'] = False # real-time psychometrics (eg item response theory analysis in instructor dashboard)
FEATURES['ENABLE_SERVICE_STATUS'] = True
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True # Enable email for all Studio courses
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Give all courses email (don't require django-admin perms)
FEATURES['ENABLE_HINTER_INSTRUCTOR_VIEW'] = True
FEATURES['ENABLE_INSTRUCTOR_LEGACY_DASHBOARD'] = True
FEATURES['MULTIPLE_ENROLLMENT_ROLES'] = True
FEATURES['ENABLE_SHOPPING_CART'] = True
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
FEATURES['ENABLE_S3_GRADE_DOWNLOADS'] = True
FEATURES['IS_EDX_DOMAIN'] = True # Is this an edX-owned domain? (used on instructor dashboard)
FEATURES['ENABLE_PAYMENT_FAKE'] = True
FEEDBACK_SUBMISSION_EMAIL = "dummy@example.com"
WIKI_ENABLED = True
DJFS = {
'type': 'osfs',
'directory_root': 'lms/static/djpyfs',
'url_root': '/static/djpyfs'
}
# If there is a database called 'read_replica', you can use the use_read_replica_if_available
# function in util/query.py, which is useful for very large database reads
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "edx.db",
}
}
CACHES = {
# This is the cache used for most things.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
'course_structure_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_course_structure_mem_cache',
},
}
XQUEUE_INTERFACE = {
"url": "https://sandbox-xqueue.edx.org",
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
COURSE_LISTINGS = {
'default': ['BerkeleyX/CS169.1x/2012_Fall',
'BerkeleyX/CS188.1x/2012_Fall',
'HarvardX/CS50x/2012',
'HarvardX/PH207x/2012_Fall',
'MITx/3.091x/2012_Fall',
'MITx/6.002x/2012_Fall',
'MITx/6.00x/2012_Fall'],
'berkeley': ['BerkeleyX/CS169/fa12',
'BerkeleyX/CS188/fa12'],
'harvard': ['HarvardX/CS50x/2012H'],
'mit': ['MITx/3.091/MIT_2012_Fall'],
'sjsu': ['MITx/6.002x-EE98/2012_Fall_SJSU'],
}
SUBDOMAIN_BRANDING = {
'sjsu': 'MITx',
'mit': 'MITx',
'berkeley': 'BerkeleyX',
'harvard': 'HarvardX',
'openedx': 'openedx',
'edge': 'edge',
}
# List of `university` landing pages to display, even though they may not
# have an actual course with that org set
VIRTUAL_UNIVERSITIES = []
# Organization that contain other organizations
META_UNIVERSITIES = {'UTx': ['UTAustinX']}
COMMENTS_SERVICE_KEY = "PUT_YOUR_API_KEY_HERE"
############################## Course static files ##########################
if os.path.isdir(DATA_DIR):
# Add the full course repo if there is no static directory
STATICFILES_DIRS += [
# TODO (cpennington): When courses are stored in a database, this
# should no longer be added to STATICFILES
(course_dir, DATA_DIR / course_dir)
for course_dir in os.listdir(DATA_DIR)
if (os.path.isdir(DATA_DIR / course_dir) and
not os.path.isdir(DATA_DIR / course_dir / 'static'))
]
# Otherwise, add only the static directory from the course dir
STATICFILES_DIRS += [
# TODO (cpennington): When courses are stored in a database, this
# should no longer be added to STATICFILES
(course_dir, DATA_DIR / course_dir / 'static')
for course_dir in os.listdir(DATA_DIR)
if (os.path.isdir(DATA_DIR / course_dir / 'static'))
]
################################# edx-platform revision string #####################
EDX_PLATFORM_VERSION_STRING = os.popen('cd %s; git describe' % REPO_ROOT).read().strip()
############################ Open ended grading config #####################
OPEN_ENDED_GRADING_INTERFACE = {
'url': 'http://127.0.0.1:3033/',
'username': 'lms',
'password': 'abcd',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
############################## LMS Migration ##################################
FEATURES['ENABLE_LMS_MIGRATION'] = True
FEATURES['ACCESS_REQUIRE_STAFF_FOR_COURSE'] = False # require that user be in the staff_* group to be able to enroll
FEATURES['XQA_SERVER'] = 'http://xqa:server@content-qa.edX.mit.edu/xqa'
INSTALLED_APPS += ('lms_migration',)
LMS_MIGRATION_ALLOWED_IPS = ['127.0.0.1']
################################ OpenID Auth #################################
FEATURES['AUTH_USE_OPENID'] = True
FEATURES['AUTH_USE_OPENID_PROVIDER'] = True
FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] = True
INSTALLED_APPS += ('external_auth',)
INSTALLED_APPS += ('django_openid_auth',)
OPENID_CREATE_USERS = False
OPENID_UPDATE_DETAILS_FROM_SREG = True
OPENID_SSO_SERVER_URL = 'https://www.google.com/accounts/o8/id' # TODO: accept more endpoints
OPENID_USE_AS_ADMIN_LOGIN = False
OPENID_PROVIDER_TRUSTED_ROOTS = ['*']
############################## OAUTH2 Provider ################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
######################## MIT Certificates SSL Auth ############################
FEATURES['AUTH_USE_CERTIFICATES'] = False
########################### External REST APIs #################################
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar', 'djpyfs',)
MIDDLEWARE_CLASSES += (
'django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
)
#################### FILE UPLOADS (for discussion forums) #####################
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = ENV_ROOT / "uploads"
MEDIA_URL = "/static/uploads/"
STATICFILES_DIRS.append(("uploads", MEDIA_ROOT))
FILE_UPLOAD_TEMP_DIR = ENV_ROOT / "uploads"
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
FEATURES['AUTH_USE_SHIB'] = True
FEATURES['RESTRICT_ENROLL_BY_REG_METHOD'] = True
########################### PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
########################## ANALYTICS TESTING ########################
ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/"
ANALYTICS_API_KEY = ""
##### Segment ######
# If there's an environment variable set, grab it
SEGMENT_KEY = os.environ.get('SEGMENT_KEY')
###################### Payment ######################
CC_PROCESSOR['CyberSource']['SHARED_SECRET'] = os.environ.get('CYBERSOURCE_SHARED_SECRET', '')
CC_PROCESSOR['CyberSource']['MERCHANT_ID'] = os.environ.get('CYBERSOURCE_MERCHANT_ID', '')
CC_PROCESSOR['CyberSource']['SERIAL_NUMBER'] = os.environ.get('CYBERSOURCE_SERIAL_NUMBER', '')
CC_PROCESSOR['CyberSource']['PURCHASE_ENDPOINT'] = '/shoppingcart/payment_fake/'
CC_PROCESSOR['CyberSource2']['SECRET_KEY'] = os.environ.get('CYBERSOURCE_SECRET_KEY', '')
CC_PROCESSOR['CyberSource2']['ACCESS_KEY'] = os.environ.get('CYBERSOURCE_ACCESS_KEY', '')
CC_PROCESSOR['CyberSource2']['PROFILE_ID'] = os.environ.get('CYBERSOURCE_PROFILE_ID', '')
CC_PROCESSOR['CyberSource2']['PURCHASE_ENDPOINT'] = '/shoppingcart/payment_fake/'
########################## USER API ##########################
EDX_API_KEY = None
####################### Shoppingcart ###########################
FEATURES['ENABLE_SHOPPING_CART'] = True
### This enables the Metrics tab for the Instructor dashboard ###########
FEATURES['CLASS_DASHBOARD'] = True
### This settings is for the course registration code length ############
REGISTRATION_CODE_LENGTH = 8
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
| agpl-3.0 |
evro/CouchPotatoServer | libs/bs4/builder/_htmlparser.py | 412 | 8839 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import (
HTMLParser,
HTMLParseError,
)
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
if data.endswith("?") and data.lower().startswith("xml"):
# "An XHTML processing instruction using the trailing '?'
# will cause the '?' to be included in data." - HTMLParser
# docs.
#
# Strip the question mark so we don't end up with two
# question marks.
data = data[:-1]
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| gpl-3.0 |
jayme-github/CouchPotatoServer | libs/sqlalchemy/orm/descriptor_props.py | 17 | 15476 | # orm/descriptor_props.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Descriptor properties are more "auxiliary" properties
that exist as configurational elements, but don't participate
as actively in the load/persist ORM loop.
"""
from sqlalchemy.orm.interfaces import \
MapperProperty, PropComparator, StrategizedProperty
from sqlalchemy.orm.mapper import _none_set
from sqlalchemy.orm import attributes, strategies
from sqlalchemy import util, sql, exc as sa_exc, event, schema
from sqlalchemy.sql import expression
properties = util.importlater('sqlalchemy.orm', 'properties')
class DescriptorProperty(MapperProperty):
""":class:`.MapperProperty` which proxies access to a
user-defined descriptor."""
doc = None
def instrument_class(self, mapper):
prop = self
class _ProxyImpl(object):
accepts_scalar_loader = False
expire_missing = True
def __init__(self, key):
self.key = key
if hasattr(prop, 'get_history'):
def get_history(self, state, dict_,
passive=attributes.PASSIVE_OFF):
return prop.get_history(state, dict_, passive)
if self.descriptor is None:
desc = getattr(mapper.class_, self.key, None)
if mapper._is_userland_descriptor(desc):
self.descriptor = desc
if self.descriptor is None:
def fset(obj, value):
setattr(obj, self.name, value)
def fdel(obj):
delattr(obj, self.name)
def fget(obj):
return getattr(obj, self.name)
self.descriptor = property(
fget=fget,
fset=fset,
fdel=fdel,
)
proxy_attr = attributes.\
create_proxied_attribute(self.descriptor)\
(
self.parent.class_,
self.key,
self.descriptor,
lambda: self._comparator_factory(mapper),
doc=self.doc
)
proxy_attr.impl = _ProxyImpl(self.key)
mapper.class_manager.instrument_attribute(self.key, proxy_attr)
class CompositeProperty(DescriptorProperty):
def __init__(self, class_, *attrs, **kwargs):
self.attrs = attrs
self.composite_class = class_
self.active_history = kwargs.get('active_history', False)
self.deferred = kwargs.get('deferred', False)
self.group = kwargs.get('group', None)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
util.set_creation_order(self)
self._create_descriptor()
def instrument_class(self, mapper):
super(CompositeProperty, self).instrument_class(mapper)
self._setup_event_handlers()
def do_init(self):
"""Initialization which occurs after the :class:`.CompositeProperty`
has been associated with its parent mapper.
"""
self._init_props()
self._setup_arguments_on_columns()
def _create_descriptor(self):
"""Create the Python descriptor that will serve as
the access point on instances of the mapped class.
"""
def fget(instance):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
if self.key not in dict_:
# key not present. Iterate through related
# attributes, retrieve their values. This
# ensures they all load.
values = [getattr(instance, key) for key in self._attribute_keys]
# current expected behavior here is that the composite is
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# if the composite were created unconditionally,
# but that would be a behavioral change.
if self.key not in dict_ and (
state.key is not None or
not _none_set.issuperset(values)
):
dict_[self.key] = self.composite_class(*values)
state.manager.dispatch.refresh(state, None, [self.key])
return dict_.get(self.key, None)
def fset(instance, value):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
attr = state.manager[self.key]
previous = dict_.get(self.key, attributes.NO_VALUE)
for fn in attr.dispatch.set:
value = fn(state, value, previous, attr.impl)
dict_[self.key] = value
if value is None:
for key in self._attribute_keys:
setattr(instance, key, None)
else:
for key, value in zip(
self._attribute_keys,
value.__composite_values__()):
setattr(instance, key, value)
def fdel(instance):
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
previous = dict_.pop(self.key, attributes.NO_VALUE)
attr = state.manager[self.key]
attr.dispatch.remove(state, previous, attr.impl)
for key in self._attribute_keys:
setattr(instance, key, None)
self.descriptor = property(fget, fset, fdel)
@util.memoized_property
def _comparable_elements(self):
return [
getattr(self.parent.class_, prop.key)
for prop in self.props
]
def _init_props(self):
self.props = props = []
for attr in self.attrs:
if isinstance(attr, basestring):
prop = self.parent.get_property(attr)
elif isinstance(attr, schema.Column):
prop = self.parent._columntoproperty[attr]
elif isinstance(attr, attributes.InstrumentedAttribute):
prop = attr.property
props.append(prop)
@property
def columns(self):
return [a for a in self.attrs if isinstance(a, schema.Column)]
def _setup_arguments_on_columns(self):
"""Propagate configuration arguments made on this composite
to the target columns, for those that apply.
"""
for prop in self.props:
prop.active_history = self.active_history
if self.deferred:
prop.deferred = self.deferred
prop.strategy_class = strategies.DeferredColumnLoader
prop.group = self.group
def _setup_event_handlers(self):
"""Establish events that populate/expire the composite attribute."""
def load_handler(state, *args):
dict_ = state.dict
if self.key in dict_:
return
# if column elements aren't loaded, skip.
# __get__() will initiate a load for those
# columns
for k in self._attribute_keys:
if k not in dict_:
return
#assert self.key not in dict_
dict_[self.key] = self.composite_class(
*[state.dict[key] for key in
self._attribute_keys]
)
def expire_handler(state, keys):
if keys is None or set(self._attribute_keys).intersection(keys):
state.dict.pop(self.key, None)
def insert_update_handler(mapper, connection, state):
"""After an insert or update, some columns may be expired due
to server side defaults, or re-populated due to client side
defaults. Pop out the composite value here so that it
recreates.
"""
state.dict.pop(self.key, None)
event.listen(self.parent, 'after_insert',
insert_update_handler, raw=True)
event.listen(self.parent, 'after_update',
insert_update_handler, raw=True)
event.listen(self.parent, 'load', load_handler, raw=True, propagate=True)
event.listen(self.parent, 'refresh', load_handler, raw=True, propagate=True)
event.listen(self.parent, "expire", expire_handler, raw=True, propagate=True)
# TODO: need a deserialize hook here
@util.memoized_property
def _attribute_keys(self):
return [
prop.key for prop in self.props
]
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
"""Provided for userland code that uses attributes.get_history()."""
added = []
deleted = []
has_history = False
for prop in self.props:
key = prop.key
hist = state.manager[key].impl.get_history(state, dict_)
if hist.has_changes():
has_history = True
non_deleted = hist.non_deleted()
if non_deleted:
added.extend(non_deleted)
else:
added.append(None)
if hist.deleted:
deleted.extend(hist.deleted)
else:
deleted.append(None)
if has_history:
return attributes.History(
[self.composite_class(*added)],
(),
[self.composite_class(*deleted)]
)
else:
return attributes.History(
(),[self.composite_class(*added)], ()
)
def _comparator_factory(self, mapper):
return self.comparator_factory(self)
class Comparator(PropComparator):
def __init__(self, prop, adapter=None):
self.prop = self.property = prop
self.adapter = adapter
def __clause_element__(self):
if self.adapter:
# TODO: test coverage for adapted composite comparison
return expression.ClauseList(
*[self.adapter(x) for x in self.prop._comparable_elements])
else:
return expression.ClauseList(*self.prop._comparable_elements)
__hash__ = None
def __eq__(self, other):
if other is None:
values = [None] * len(self.prop._comparable_elements)
else:
values = other.__composite_values__()
return sql.and_(
*[a==b for a, b in zip(self.prop._comparable_elements, values)])
def __ne__(self, other):
return sql.not_(self.__eq__(other))
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
class ConcreteInheritedProperty(DescriptorProperty):
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
"""
def _comparator_factory(self, mapper):
comparator_callable = None
for m in self.parent.iterate_to_root():
p = m._props[self.key]
if not isinstance(p, ConcreteInheritedProperty):
comparator_callable = p.comparator_factory
break
return comparator_callable
def __init__(self):
def warn():
raise AttributeError("Concrete %s does not implement "
"attribute %r at the instance level. Add this "
"property explicitly to %s." %
(self.parent, self.key, self.parent))
class NoninheritedConcreteProp(object):
def __set__(s, obj, value):
warn()
def __delete__(s, obj):
warn()
def __get__(s, obj, owner):
if obj is None:
return self.descriptor
warn()
self.descriptor = NoninheritedConcreteProp()
class SynonymProperty(DescriptorProperty):
def __init__(self, name, map_column=None,
descriptor=None, comparator_factory=None,
doc=None):
self.name = name
self.map_column = map_column
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
util.set_creation_order(self)
# TODO: when initialized, check _proxied_property,
# emit a warning if its not a column-based property
@util.memoized_property
def _proxied_property(self):
return getattr(self.parent.class_, self.name).property
def _comparator_factory(self, mapper):
prop = self._proxied_property
if self.comparator_factory:
comp = self.comparator_factory(prop, mapper)
else:
comp = prop.comparator_factory(prop, mapper)
return comp
def set_parent(self, parent, init):
if self.map_column:
# implement the 'map_column' option.
if self.key not in parent.mapped_table.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
% (self.name, parent.mapped_table.description, self.key))
elif parent.mapped_table.c[self.key] in \
parent._columntoproperty and \
parent._columntoproperty[
parent.mapped_table.c[self.key]
].key == self.name:
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" %
(self.key, self.name, self.name, self.key)
)
p = properties.ColumnProperty(parent.mapped_table.c[self.key])
parent._configure_property(
self.name, p,
init=init,
setparent=True)
p._mapped_by_synonym = self.key
self.parent = parent
class ComparableProperty(DescriptorProperty):
"""Instruments a Python property for use in query expressions."""
def __init__(self, comparator_factory, descriptor=None, doc=None):
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
util.set_creation_order(self)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
| gpl-3.0 |
ihsanudin/odoo | addons/resource/tests/test_resource.py | 243 | 32181 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.addons.resource.tests.common import TestResourceCommon
class TestResource(TestResourceCommon):
def test_00_intervals(self):
intervals = [
(
datetime.strptime('2013-02-04 09:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 12:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 19:00:00', '%Y-%m-%d %H:%M:%S')
)
]
# Test: interval cleaning
cleaned_intervals = self.resource_calendar.interval_clean(intervals)
self.assertEqual(len(cleaned_intervals), 3, 'resource_calendar: wrong interval cleaning')
# First interval: 03, unchanged
self.assertEqual(cleaned_intervals[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Second intreval: 04, 08-14, combining 08-12 and 11-14, 09-11 being inside 08-12
self.assertEqual(cleaned_intervals[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Third interval: 04, 17-21, 18-19 being inside 17-21
self.assertEqual(cleaned_intervals[2][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[2][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Test: disjoint removal
working_interval = (datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'))
result = self.resource_calendar.interval_remove_leaves(working_interval, intervals)
self.assertEqual(len(result), 1, 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 14-17
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 11:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals, backwards
cleaned_intervals.reverse()
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5, remove_at_end=False)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 12:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
def test_10_calendar_basics(self):
""" Testing basic method of resource.calendar """
cr, uid = self.cr, self.uid
# --------------------------------------------------
# Test1: get_next_day
# --------------------------------------------------
# Test: next day: next day after day1 is day4
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4+1 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day1-1 is day1
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong next day computing')
# --------------------------------------------------
# Test2: get_previous_day
# --------------------------------------------------
# Test: previous day: previous day before day1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4 is day1
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4+1 is day4
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day1-1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# --------------------------------------------------
# Test3: misc
# --------------------------------------------------
weekdays = self.resource_calendar.get_weekdays(cr, uid, self.calendar_id)
self.assertEqual(weekdays, [1, 4], 'resource_calendar: wrong weekdays computing')
attendances = self.resource_calendar.get_attendances_for_weekdays(cr, uid, self.calendar_id, [2, 3, 4, 5])
self.assertEqual(set([att.id for att in attendances]), set([self.att2_id, self.att3_id]),
'resource_calendar: wrong attendances filtering by weekdays computing')
def test_20_calendar_working_intervals(self):
""" Testing working intervals computing method of resource.calendar """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day0 without leaves: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 09:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day3 without leaves: 2 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date2)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-15 10:11:12', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves outside range: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=0), compute_leaves=True)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves: 2 intervals because of leave between 9 ans 12, ending at 15:45:30
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=8) + relativedelta(days=7),
end_dt=self.date1.replace(hour=15, minute=45, second=30) + relativedelta(days=7),
compute_leaves=True)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 15:45:30', _format), 'resource_calendar: wrong working intervals')
def test_30_calendar_working_days(self):
""" Testing calendar hours computation on a working day """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day1, beginning at 10:30 -> work from 10:30 (arrival) until 16:00
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 10:30:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: hour computation for same interval, should give 5.5
wh = self.resource_calendar.get_working_hours_of_date(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(wh, 5.5, 'resource_calendar: wrong working interval / day time computing')
# Test: day1+7 on leave, without leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7)
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+7 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7),
compute_leaves=True
)
# Result: day1 (08->09 + 12->16)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-26 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with resource leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True,
resource_id=self.resource1_id
)
# Result: nothing, because on leave
self.assertEqual(len(intervals), 0, 'resource_calendar: wrong working interval/day computing')
def test_40_calendar_hours_scheduling(self):
""" Testing calendar hours scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test0: schedule hours backwards (old interval_min_get)
# Done without calendar
# --------------------------------------------------
# Done without calendar
# res = self.resource_calendar.interval_min_get(cr, uid, None, self.date1, 40, resource=False)
# res: (datetime.datetime(2013, 2, 7, 9, 8, 7), datetime.datetime(2013, 2, 12, 9, 8, 7))
# --------------------------------------------------
# Test1: schedule hours backwards (old interval_min_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_min_get(cr, uid, self.calendar_id, self.date1, 40, resource=False)
# (datetime.datetime(2013, 1, 29, 9, 0), datetime.datetime(2013, 1, 29, 16, 0))
# (datetime.datetime(2013, 2, 1, 8, 0), datetime.datetime(2013, 2, 1, 13, 0))
# (datetime.datetime(2013, 2, 1, 16, 0), datetime.datetime(2013, 2, 1, 23, 0))
# (datetime.datetime(2013, 2, 5, 8, 0), datetime.datetime(2013, 2, 5, 16, 0))
# (datetime.datetime(2013, 2, 8, 8, 0), datetime.datetime(2013, 2, 8, 13, 0))
# (datetime.datetime(2013, 2, 8, 16, 0), datetime.datetime(2013, 2, 8, 23, 0))
# (datetime.datetime(2013, 2, 12, 8, 0), datetime.datetime(2013, 2, 12, 9, 0))
res = self.resource_calendar.schedule_hours(cr, uid, self.calendar_id, -40, day_dt=self.date1.replace(minute=0, second=0))
# current day, limited at 09:00 because of day_dt specified -> 1 hour
self.assertEqual(res[-1][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-1][1], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
# previous days: 5+7 hours / 8 hours / 5+7 hours -> 32 hours
self.assertEqual(res[-2][0], datetime.strptime('2013-02-08 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-2][1], datetime.strptime('2013-02-08 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][0], datetime.strptime('2013-02-08 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][1], datetime.strptime('2013-02-08 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][0], datetime.strptime('2013-02-05 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][1], datetime.strptime('2013-02-05 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][0], datetime.strptime('2013-02-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][1], datetime.strptime('2013-02-01 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][0], datetime.strptime('2013-02-01 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][1], datetime.strptime('2013-02-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
# 7 hours remaining
self.assertEqual(res[-7][0], datetime.strptime('2013-01-29 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-7][1], datetime.strptime('2013-01-29 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
# Compute scheduled hours
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test2: schedule hours forward (old interval_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=False, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 2, 22, 8, 0), datetime.datetime(2013, 2, 22, 13, 0))
# (datetime.datetime(2013, 2, 22, 16, 0), datetime.datetime(2013, 2, 22, 23, 0))
# (datetime.datetime(2013, 2, 26, 8, 0), datetime.datetime(2013, 2, 26, 16, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0)
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-22 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-26 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=self.resource1_id, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 13, 0))
# (datetime.datetime(2013, 3, 1, 16, 0), datetime.datetime(2013, 3, 1, 23, 0))
# (datetime.datetime(2013, 3, 5, 8, 0), datetime.datetime(2013, 3, 5, 16, 0))
# (datetime.datetime(2013, 3, 8, 8, 0), datetime.datetime(2013, 3, 8, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0),
compute_leaves=True,
resource_id=self.resource1_id
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][0], datetime.strptime('2013-03-01 11:30:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][1], datetime.strptime('2013-03-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][0], datetime.strptime('2013-03-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][1], datetime.strptime('2013-03-01 22:30:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test3: working hours (old _interval_hours_get)
# --------------------------------------------------
# old API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=True)
self.assertEqual(res, 40.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=False, resource_id=self.resource1_id)
self.assertEqual(res, 40.0, 'resource_calendar: wrong get_working_hours computation')
# old API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=False)
self.assertEqual(res, 33.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res, 33.0, 'resource_calendar: wrong get_working_hours computation')
# --------------------------------------------------
# Test4: misc
# --------------------------------------------------
# Test without calendar and default_interval
res = self.resource_calendar.get_working_hours(
cr, uid, None,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0),
compute_leaves=True, resource_id=self.resource1_id,
default_interval=(8, 16))
self.assertEqual(res, 32.0, 'resource_calendar: wrong get_working_hours computation')
def test_50_calendar_schedule_days(self):
""" Testing calendar days scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test1: with calendar
# --------------------------------------------------
res = self.resource_calendar.schedule_days_get_date(cr, uid, self.calendar_id, 5, day_date=self.date1)
self.assertEqual(res.date(), datetime.strptime('2013-02-26 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
res = self.resource_calendar.schedule_days_get_date(
cr, uid, self.calendar_id, 5, day_date=self.date1,
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res.date(), datetime.strptime('2013-03-01 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
# --------------------------------------------------
# Test2: misc
# --------------------------------------------------
# Without calendar, should only count days -> 12 -> 16, 5 days with default intervals
res = self.resource_calendar.schedule_days_get_date(cr, uid, None, 5, day_date=self.date1, default_interval=(8, 16))
self.assertEqual(res, datetime.strptime('2013-02-16 16:00:00', _format), 'resource_calendar: wrong days scheduling')
def seconds(td):
assert isinstance(td, timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
| agpl-3.0 |
gVallverdu/pymatgen | pymatgen/analysis/gb/grain.py | 2 | 117980 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module containing classes to generate grain boundaries.
"""
import numpy as np
from fractions import Fraction
from math import gcd, floor, cos
from functools import reduce
from pymatgen import Structure, Lattice
from pymatgen.core.sites import PeriodicSite
from monty.fractions import lcm
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
import itertools
import logging
import warnings
# This module implements representations of grain boundaries, as well as
# algorithms for generating them.
__author__ = "Xiang-Guo Li"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Xiang-Guo Li"
__email__ = "xil110@ucsd.edu"
__date__ = "7/30/18"
logger = logging.getLogger(__name__)
class GrainBoundary(Structure):
"""
Subclass of Structure representing a GrainBoundary (gb) object.
Implements additional attributes pertaining to gbs, but the
init method does not actually implement any algorithm that
creates a gb. This is a DUMMY class who's init method only holds
information about the gb. Also has additional methods that returns
other information about a gb such as sigma value.
Note that all gbs have the gb surface normal oriented in the c-direction.
This means the lattice vectors a and b are in the gb surface plane (at
least for one grain) and the c vector is out of the surface plane
(though not necessary perpendicular to the surface.)
"""
def __init__(self, lattice, species, coords, rotation_axis, rotation_angle,
gb_plane, join_plane, init_cell, vacuum_thickness, ab_shift,
site_properties, oriented_unit_cell, validate_proximity=False,
coords_are_cartesian=False):
"""
Makes a gb structure, a structure object with additional information
and methods pertaining to gbs.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
rotation_axis (list): Rotation axis of GB in the form of a list of
integers, e.g. [1, 1, 0].
rotation_angle (float, in unit of degree): rotation angle of GB.
gb_plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3].
join_plane (list): Joining plane of the second grain in the form of a list of
integers. e.g.: [1, 2, 3].
init_cell (Structure): initial bulk structure to form the GB.
site_properties (dict): Properties associated with the sites as a
dict of sequences, The sequences have to be the same length as
the atomic species and fractional_coords. For gb, you should
have the 'grain_label' properties to classify the sites as 'top',
'bottom', 'top_incident', or 'bottom_incident'.
vacuum_thickness (float in angstrom): The thickness of vacuum inserted
between two grains of the GB.
ab_shift (list of float, in unit of crystal vector a, b): The relative
shift along a, b vectors.
oriented_unit_cell (Structure): oriented unit cell of the bulk init_cell.
Help to accurate calculate the bulk properties that are consistent
with gb calculations.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
"""
self.oriented_unit_cell = oriented_unit_cell
self.rotation_axis = rotation_axis
self.rotation_angle = rotation_angle
self.gb_plane = gb_plane
self.join_plane = join_plane
self.init_cell = init_cell
self.vacuum_thickness = vacuum_thickness
self.ab_shift = ab_shift
super().__init__(
lattice, species, coords, validate_proximity=validate_proximity,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
def copy(self):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
return GrainBoundary(self.lattice, self.species_and_occu, self.frac_coords,
self.rotation_axis, self.rotation_angle, self.gb_plane,
self.join_plane, self.init_cell, self.vacuum_thickness,
self.ab_shift, self.site_properties, self.oriented_unit_cell)
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species. Note that Slab has to override this
because of the different __init__ args.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
s = Structure.from_sites(sites)
return GrainBoundary(s.lattice, s.species_and_occu, s.frac_coords,
self.rotation_axis, self.rotation_angle, self.gb_plane,
self.join_plane, self.init_cell, self.vacuum_thickness,
self.ab_shift, self.site_properties, self.oriented_unit_cell)
@property
def sigma(self):
"""
This method returns the sigma value of the gb.
If using 'quick_gen' to generate GB, this value is not valid.
"""
return int(round(self.oriented_unit_cell.volume / self.init_cell.volume))
@property
def sigma_from_site_prop(self):
"""
This method returns the sigma value of the gb from site properties.
If the GB structure merge some atoms due to the atoms too closer with
each other, this property will not work.
"""
num_coi = 0
if None in self.site_properties['grain_label']:
raise RuntimeError('Site were merged, this property do not work')
for tag in self.site_properties['grain_label']:
if 'incident' in tag:
num_coi += 1
return int(round(self.num_sites / num_coi))
@property
def top_grain(self):
"""
return the top grain (Structure) of the GB.
"""
top_sites = []
for i, tag in enumerate(self.site_properties['grain_label']):
if 'top' in tag:
top_sites.append(self.sites[i])
return Structure.from_sites(top_sites)
@property
def bottom_grain(self):
"""
return the bottom grain (Structure) of the GB.
"""
bottom_sites = []
for i, tag in enumerate(self.site_properties['grain_label']):
if 'bottom' in tag:
bottom_sites.append(self.sites[i])
return Structure.from_sites(bottom_sites)
@property
def coincidents(self):
"""
return the a list of coincident sites.
"""
coincident_sites = []
for i, tag in enumerate(self.site_properties['grain_label']):
if 'incident' in tag:
coincident_sites.append(self.sites[i])
return coincident_sites
def __str__(self):
comp = self.composition
outs = [
"Gb Summary (%s)" % comp.formula,
"Reduced Formula: %s" % comp.reduced_formula,
"Rotation axis: %s" % (self.rotation_axis,),
"Rotation angle: %s" % (self.rotation_angle,),
"GB plane: %s" % (self.gb_plane,),
"Join plane: %s" % (self.join_plane,),
"vacuum thickness: %s" % (self.vacuum_thickness,),
"ab_shift: %s" % (self.ab_shift,), ]
def to_s(x, rjust=10):
return ("%0.6f" % x).rjust(rjust)
outs.append("abc : " + " ".join([to_s(i) for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i) for i in self.lattice.angles]))
outs.append("Sites ({i})".format(i=len(self)))
for i, site in enumerate(self):
outs.append(" ".join([str(i + 1), site.species_string, " ".join([to_s(j, 12) for j in site.frac_coords])]))
return "\n".join(outs)
def as_dict(self):
"""
Returns:
Dictionary representation of GrainBoundary object
"""
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["init_cell"] = self.init_cell.as_dict()
d["rotation_axis"] = self.rotation_axis
d["rotation_angle"] = self.rotation_angle
d["gb_plane"] = self.gb_plane
d["join_plane"] = self.join_plane
d["vacuum_thickness"] = self.vacuum_thickness
d["ab_shift"] = self.ab_shift
d["oriented_unit_cell"] = self.oriented_unit_cell.as_dict()
return d
@classmethod
def from_dict(cls, d):
"""
Generates a GrainBoundary object from a dictionary created by as_dict().
Args:
d: dict
Returns:
GrainBoundary object
"""
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return GrainBoundary(
lattice=lattice,
species=s.species_and_occu, coords=s.frac_coords,
rotation_axis=d["rotation_axis"],
rotation_angle=d["rotation_angle"],
gb_plane=d["gb_plane"],
join_plane=d["join_plane"],
init_cell=Structure.from_dict(d["init_cell"]),
vacuum_thickness=d["vacuum_thickness"],
ab_shift=d["ab_shift"],
oriented_unit_cell=Structure.from_dict(d["oriented_unit_cell"]),
site_properties=s.site_properties)
class GrainBoundaryGenerator:
"""
This class is to generate grain boundaries (GBs) from bulk
conventional cell (fcc, bcc can from the primitive cell), and works for Cubic,
Tetragonal, Orthorhombic, Rhombohedral, and Hexagonal systems.
It generate GBs from given parameters, which includes
GB plane, rotation axis, rotation angle.
This class works for any general GB, including twist, tilt and mixed GBs.
The three parameters, rotation axis, GB plane and rotation angle, are
sufficient to identify one unique GB. While sometimes, users may not be able
to tell what exactly rotation angle is but prefer to use sigma as an parameter,
this class also provides the function that is able to return all possible
rotation angles for a specific sigma value.
The same sigma value (with rotation axis fixed) can correspond to
multiple rotation angles.
Users can use structure matcher in pymatgen to get rid of the redundant structures.
"""
def __init__(self, initial_structure, symprec=0.1, angle_tolerance=1):
"""
initial_structure (Structure): Initial input structure. It can
be conventional or primitive cell (primitive cell works for bcc and fcc).
For fcc and bcc, using conventional cell can lead to a non-primitive
grain boundary structure.
This code supplies Cubic, Tetragonal, Orthorhombic, Rhombohedral, and
Hexagonal systems.
symprec (float): Tolerance for symmetry finding. Defaults to 0.1 (the value used
in Materials Project), which is for structures with slight deviations
from their proper atomic positions (e.g., structures relaxed with
electronic structure codes).
A smaller value of 0.01 is often used for properly refined
structures with atoms in the proper symmetry coordinates.
User should make sure the symmetry is what you want.
angle_tolerance (float): Angle tolerance for symmetry finding.
"""
analyzer = SpacegroupAnalyzer(initial_structure, symprec, angle_tolerance)
self.lat_type = analyzer.get_lattice_type()[0]
if (self.lat_type == 't'):
# need to use the conventional cell for tetragonal
initial_structure = analyzer.get_conventional_standard_structure()
a, b, c = initial_structure.lattice.abc
# c axis of tetragonal structure not in the third direction
if abs(a - b) > symprec:
# a == c, rotate b to the third direction
if abs(a - c) < symprec:
initial_structure.make_supercell([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
# b == c, rotate a to the third direction
else:
initial_structure.make_supercell([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
elif (self.lat_type == 'h'):
alpha, beta, gamma = initial_structure.lattice.angles
# c axis is not in the third direction
if (abs(gamma - 90) < angle_tolerance):
# alpha = 120 or 60, rotate b, c to a, b vectors
if (abs(alpha - 90) > angle_tolerance):
initial_structure.make_supercell([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
# beta = 120 or 60, rotate c, a to a, b vectors
elif (abs(beta - 90) > angle_tolerance):
initial_structure.make_supercell([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
elif (self.lat_type == 'r'):
# need to use primitive cell for rhombohedra
initial_structure = analyzer.get_primitive_standard_structure()
elif (self.lat_type == 'o'):
# need to use the conventional cell for orthorombic
initial_structure = analyzer.get_conventional_standard_structure()
self.initial_structure = initial_structure
def gb_from_parameters(self, rotation_axis, rotation_angle, expand_times=4, vacuum_thickness=0.0,
ab_shift=[0, 0], normal=False, ratio=None, plane=None, max_search=20,
tol_coi=1.e-8, rm_ratio=0.7, quick_gen=False):
"""
Args:
rotation_axis (list): Rotation axis of GB in the form of a list of integer
e.g.: [1, 1, 0]
rotation_angle (float, in unit of degree): rotation angle used to generate GB.
Make sure the angle is accurate enough. You can use the enum* functions
in this class to extract the accurate angle.
e.g.: The rotation angle of sigma 3 twist GB with the rotation axis
[1, 1, 1] and GB plane (1, 1, 1) can be 60.000000000 degree.
If you do not know the rotation angle, but know the sigma value, we have
provide the function get_rotation_angle_from_sigma which is able to return
all the rotation angles of sigma value you provided.
expand_times (int): The multiple times used to expand one unit grain to larger grain.
This is used to tune the grain length of GB to warrant that the two GBs in one
cell do not interact with each other. Default set to 4.
vacuum_thickness (float, in angstrom): The thickness of vacuum that you want to insert
between two grains of the GB. Default to 0.
ab_shift (list of float, in unit of a, b vectors of Gb): in plane shift of two grains
normal (logic):
determine if need to require the c axis of top grain (first transformation matrix)
perperdicular to the surface or not.
default to false.
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
This code also supplies a class method to generate the ratio from the
structure (get_ratio). User can also make their own approximation and
input the ratio directly.
plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3]. If none, we set it as twist GB. The plane will be perpendicular
to the rotation axis.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane. For complex GB, if you want to speed up, you can reduce this value.
But too small of this value may lead to error.
tol_coi (float): tolerance to find the coincidence sites. When making approximations to
the ratio needed to generate the GB, you probably need to increase this tolerance to
obtain the correct number of coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated Gb object's sigma_from_site_prop
with enum* sigma values (what user expected by input).
rm_ratio (float): the criteria to remove the atoms which are too close with each other.
rm_ratio*bond_length of bulk system is the criteria of bond length, below which the atom
will be removed. Default to 0.7.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
Grain boundary structure (gb object).
"""
lat_type = self.lat_type
# if the initial structure is primitive cell in cubic system,
# calculate the transformation matrix from its conventional cell
# to primitive cell, basically for bcc and fcc systems.
trans_cry = np.eye(3)
if lat_type == 'c':
analyzer = SpacegroupAnalyzer(self.initial_structure)
convention_cell = analyzer.get_conventional_standard_structure()
vol_ratio = self.initial_structure.volume / convention_cell.volume
# bcc primitive cell, belong to cubic system
if abs(vol_ratio - 0.5) < 1.e-3:
trans_cry = np.array([[0.5, 0.5, -0.5], [-0.5, 0.5, 0.5], [0.5, -0.5, 0.5]])
logger.info("Make sure this is for cubic with bcc primitive cell")
# fcc primitive cell, belong to cubic system
elif abs(vol_ratio - 0.25) < 1.e-3:
trans_cry = np.array([[0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]])
logger.info("Make sure this is for cubic with fcc primitive cell")
else:
logger.info("Make sure this is for cubic with conventional cell")
elif lat_type == 't':
logger.info("Make sure this is for tetragonal system")
if ratio is None:
logger.info('Make sure this is for irrational c2/a2')
elif len(ratio) != 2:
raise RuntimeError('Tetragonal system needs correct c2/a2 ratio')
elif lat_type == 'o':
logger.info('Make sure this is for orthorhombic system')
if ratio is None:
raise RuntimeError('CSL does not exist if all axial ratios are irrational '
'for an orthorhombic system')
elif len(ratio) != 3:
raise RuntimeError('Orthorhombic system needs correct c2:b2:a2 ratio')
elif lat_type == 'h':
logger.info('Make sure this is for hexagonal system')
if ratio is None:
logger.info('Make sure this is for irrational c2/a2')
elif len(ratio) != 2:
raise RuntimeError('Hexagonal system needs correct c2/a2 ratio')
elif lat_type == 'r':
logger.info('Make sure this is for rhombohedral system')
if ratio is None:
logger.info('Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio')
elif len(ratio) != 2:
raise RuntimeError('Rhombohedral system needs correct '
'(1+2*cos(alpha)/cos(alpha) ratio')
else:
raise RuntimeError('Lattice type not implemented. This code works for cubic, '
'tetragonal, orthorhombic, rhombehedral, hexagonal systems')
# transform four index notation to three index notation for hexagonal and rhombohedral
if len(rotation_axis) == 4:
u1 = rotation_axis[0]
v1 = rotation_axis[1]
w1 = rotation_axis[3]
if lat_type.lower() == 'h':
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
rotation_axis = [u, v, w]
elif lat_type.lower() == 'r':
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
rotation_axis = [u, v, w]
# make sure gcd(rotation_axis)==1
if reduce(gcd, rotation_axis) != 1:
rotation_axis = [int(round(x / reduce(gcd, rotation_axis))) for x in rotation_axis]
# transform four index notation to three index notation for plane
if plane is not None:
if len(plane) == 4:
u1 = plane[0]
v1 = plane[1]
w1 = plane[3]
plane = [u1, v1, w1]
# set the plane for grain boundary when plane is None.
if plane is None:
if lat_type.lower() == 'c':
plane = rotation_axis
else:
if lat_type.lower() == 'h':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == 'r':
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array([[1, cos_alpha, cos_alpha], [cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1]])
elif lat_type.lower() == 't':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == 'o':
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array([[1, 0, 0], [0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]]])
else:
raise RuntimeError('Lattice type has not implemented.')
plane = np.matmul(rotation_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in plane]
least_mul = reduce(lcm, [f.denominator for f in fractions])
plane = [int(round(x * least_mul)) for x in plane]
if reduce(gcd, plane) != 1:
index = reduce(gcd, plane)
plane = [int(round(x / index)) for x in plane]
t1, t2 = self.get_trans_mat(r_axis=rotation_axis, angle=rotation_angle, normal=normal,
trans_cry=trans_cry, lat_type=lat_type, ratio=ratio,
surface=plane, max_search=max_search, quick_gen=quick_gen)
# find the join_plane
if lat_type.lower() != 'c':
if lat_type.lower() == 'h':
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
trans_cry1 = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0],
[0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == 'r':
if ratio is None:
c2_a2_ratio = 1
else:
mu, mv = ratio
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry1 = np.array([[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)]])
else:
if lat_type.lower() == 't':
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == 'o':
new_ratio = [1 if v is None else v for v in ratio]
mu, lam, mv = new_ratio
trans_cry1 = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
else:
trans_cry1 = trans_cry
grain_matrix = np.dot(t2, trans_cry1)
plane_init = np.cross(grain_matrix[0], grain_matrix[1])
if lat_type.lower() != 'c':
plane_init = np.dot(plane_init, trans_cry1.T)
join_plane = self.vec_to_surface(plane_init)
parent_structure = self.initial_structure.copy()
# calculate the bond_length in bulk system.
if len(parent_structure) == 1:
temp_str = parent_structure.copy()
temp_str.make_supercell([1, 1, 2])
distance = temp_str.distance_matrix
else:
distance = parent_structure.distance_matrix
bond_length = np.min(distance[np.nonzero(distance)])
# top grain
top_grain = fix_pbc(parent_structure * t1)
# obtain the smallest oriended cell
if normal and not quick_gen:
t_temp = self.get_trans_mat(r_axis=rotation_axis, angle=rotation_angle, normal=False,
trans_cry=trans_cry, lat_type=lat_type, ratio=ratio,
surface=plane, max_search=max_search)
oriended_unit_cell = fix_pbc(parent_structure * t_temp[0])
t_matrix = oriended_unit_cell.lattice.matrix
normal_v_plane = np.cross(t_matrix[0], t_matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
unit_ab_adjust = (t_matrix[2] - np.dot(unit_normal_v, t_matrix[2]) * unit_normal_v) \
/ np.dot(unit_normal_v, t_matrix[2])
else:
oriended_unit_cell = top_grain.copy()
unit_ab_adjust = 0.0
# bottom grain, using top grain's lattice matrix
bottom_grain = fix_pbc(parent_structure * t2, top_grain.lattice.matrix)
# label both grains with 'top','bottom','top_incident','bottom_incident'
n_sites = top_grain.num_sites
t_and_b = Structure(top_grain.lattice, top_grain.species + bottom_grain.species,
list(top_grain.frac_coords) + list(bottom_grain.frac_coords))
t_and_b_dis = t_and_b.lattice.get_all_distances(t_and_b.frac_coords[0:n_sites],
t_and_b.frac_coords[n_sites:n_sites * 2])
index_incident = np.nonzero(t_and_b_dis < np.min(t_and_b_dis) + tol_coi)
top_labels = []
for i in range(n_sites):
if i in index_incident[0]:
top_labels.append('top_incident')
else:
top_labels.append('top')
bottom_labels = []
for i in range(n_sites):
if i in index_incident[1]:
bottom_labels.append('bottom_incident')
else:
bottom_labels.append('bottom')
top_grain = Structure(Lattice(top_grain.lattice.matrix), top_grain.species,
top_grain.frac_coords, site_properties={'grain_label': top_labels})
bottom_grain = Structure(Lattice(bottom_grain.lattice.matrix), bottom_grain.species,
bottom_grain.frac_coords, site_properties={'grain_label': bottom_labels})
# expand both grains
top_grain.make_supercell([1, 1, expand_times])
bottom_grain.make_supercell([1, 1, expand_times])
top_grain = fix_pbc(top_grain)
bottom_grain = fix_pbc(bottom_grain)
# determine the top-grain location.
edge_b = 1.0 - max(bottom_grain.frac_coords[:, 2])
edge_t = 1.0 - max(top_grain.frac_coords[:, 2])
c_adjust = (edge_t - edge_b) / 2.0
# construct all species
all_species = []
all_species.extend([site.specie for site in bottom_grain])
all_species.extend([site.specie for site in top_grain])
half_lattice = top_grain.lattice
# calculate translation vector, perpendicular to the plane
normal_v_plane = np.cross(half_lattice.matrix[0], half_lattice.matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
translation_v = unit_normal_v * vacuum_thickness
# construct the final lattice
whole_matrix_no_vac = np.array(half_lattice.matrix)
whole_matrix_no_vac[2] = half_lattice.matrix[2] * 2
whole_matrix_with_vac = whole_matrix_no_vac.copy()
whole_matrix_with_vac[2] = whole_matrix_no_vac[2] + translation_v * 2
whole_lat = Lattice(whole_matrix_with_vac)
# construct the coords, move top grain with translation_v
all_coords = []
grain_labels = bottom_grain.site_properties['grain_label'] + top_grain.site_properties['grain_label']
for site in bottom_grain:
all_coords.append(site.coords)
for site in top_grain:
all_coords.append(site.coords + half_lattice.matrix[2] * (1 + c_adjust) +
unit_ab_adjust * np.linalg.norm(half_lattice.matrix[2] * (1 + c_adjust)) +
translation_v + ab_shift[0] * whole_matrix_with_vac[0] +
ab_shift[1] * whole_matrix_with_vac[1])
gb_with_vac = Structure(whole_lat, all_species, all_coords,
coords_are_cartesian=True,
site_properties={'grain_label': grain_labels})
# merge closer atoms. extract near gb atoms.
cos_c_norm_plane = np.dot(unit_normal_v, whole_matrix_with_vac[2]) / whole_lat.c
range_c_len = abs(bond_length / cos_c_norm_plane / whole_lat.c)
sites_near_gb = []
sites_away_gb = []
for site in gb_with_vac.sites:
if site.frac_coords[2] < range_c_len or site.frac_coords[2] > 1 - range_c_len \
or (site.frac_coords[2] > 0.5 - range_c_len and site.frac_coords[2] < 0.5 + range_c_len):
sites_near_gb.append(site)
else:
sites_away_gb.append(site)
if len(sites_near_gb) >= 1:
s_near_gb = Structure.from_sites(sites_near_gb)
s_near_gb.merge_sites(tol=bond_length * rm_ratio, mode='d')
all_sites = sites_away_gb + s_near_gb.sites
gb_with_vac = Structure.from_sites(all_sites)
# move coordinates into the periodic cell.
gb_with_vac = fix_pbc(gb_with_vac, whole_lat.matrix)
return GrainBoundary(whole_lat, gb_with_vac.species, gb_with_vac.cart_coords, rotation_axis,
rotation_angle, plane, join_plane, self.initial_structure,
vacuum_thickness, ab_shift, site_properties=gb_with_vac.site_properties,
oriented_unit_cell=oriended_unit_cell,
coords_are_cartesian=True)
def get_ratio(self, max_denominator=5, index_none=None):
"""
find the axial ratio needed for GB generator input.
Args:
max_denominator (int): the maximum denominator for
the computed ratio, default to be 5.
index_none (int): specify the irrational axis.
0-a, 1-b, 2-c. Only may be needed for orthorombic system.
Returns:
axial ratio needed for GB generator (list of integers).
"""
structure = self.initial_structure
lat_type = self.lat_type
if lat_type == 't' or lat_type == 'h':
# For tetragonal and hexagonal system, ratio = c2 / a2.
a, c = (structure.lattice.a, structure.lattice.c)
if c > a:
frac = Fraction(c ** 2 / a ** 2).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
else:
frac = Fraction(a ** 2 / c ** 2).limit_denominator(max_denominator)
ratio = [frac.denominator, frac.numerator]
elif lat_type == 'r':
# For rhombohedral system, ratio = (1 + 2 * cos(alpha)) / cos(alpha).
cos_alpha = cos(structure.lattice.alpha / 180 * np.pi)
frac = Fraction((1 + 2 * cos_alpha) / cos_alpha).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
elif lat_type == 'o':
# For orthorhombic system, ratio = c2:b2:a2.If irrational for one axis, set it to None.
ratio = [None] * 3
lat = (structure.lattice.c, structure.lattice.b, structure.lattice.a)
index = [0, 1, 2]
if index_none is None:
min_index = np.argmin(lat)
index.pop(min_index)
frac1 = Fraction(lat[index[0]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
frac2 = Fraction(lat[index[1]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
com_lcm = lcm(frac1.denominator, frac2.denominator)
ratio[min_index] = com_lcm
ratio[index[0]] = frac1.numerator * int(round((com_lcm / frac1.denominator)))
ratio[index[1]] = frac2.numerator * int(round((com_lcm / frac2.denominator)))
else:
index.pop(index_none)
if (lat[index[0]] > lat[index[1]]):
frac = Fraction(lat[index[0]] ** 2 / lat[index[1]] ** 2).limit_denominator(max_denominator)
ratio[index[0]] = frac.numerator
ratio[index[1]] = frac.denominator
else:
frac = Fraction(lat[index[1]] ** 2 / lat[index[0]] ** 2).limit_denominator(max_denominator)
ratio[index[1]] = frac.numerator
ratio[index[0]] = frac.denominator
elif lat_type == 'c':
raise RuntimeError('Cubic system does not need axial ratio.')
else:
raise RuntimeError('Lattice type not implemented.')
return ratio
@staticmethod
def get_trans_mat(r_axis, angle, normal=False, trans_cry=np.eye(3), lat_type='c',
ratio=None, surface=None, max_search=20, quick_gen=False):
"""
Find the two transformation matrix for each grain from given rotation axis,
GB plane, rotation angle and corresponding ratio (see explanation for ratio
below).
The structure of each grain can be obtained by applying the corresponding
transformation matrix to the conventional cell.
The algorithm for this code is from reference, Acta Cryst, A32,783(1976).
Args:
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w for hex/rho system only):
the rotation axis of the grain boundary.
angle (float, in unit of degree) :
the rotation angle of the grain boundary
normal (logic):
determine if need to require the c axis of one grain associated with
the first transformation matrix perperdicular to the surface or not.
default to false.
trans_cry (3 by 3 array):
if the structure given are primitive cell in cubic system, e.g.
bcc or fcc system, trans_cry is the transformation matrix from its
conventional cell to the primitive cell.
lat_type ( one character):
'c' or 'C': cubic system
't' or 'T': tetragonal system
'o' or 'O': orthorhombic system
'h' or 'H': hexagonal system
'r' or 'R': rhombohedral system
default to cubic system
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha)/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
surface (list of three integers, e.g. h, k, l
or four integers, e.g. h, k, i, l for hex/rho system only):
the miller index of grain boundary plane, with the format of [h,k,l]
if surface is not given, the default is perpendicular to r_axis, which is
a twist grain boundary.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
t1 (3 by 3 integer array):
The transformation array for one grain.
t2 (3 by 3 integer array):
The transformation array for the other grain
"""
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
if lat_type.lower() == 'h':
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
r_axis = [u, v, w]
elif lat_type.lower() == 'r':
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
if surface is not None:
if len(surface) == 4:
u1 = surface[0]
v1 = surface[1]
w1 = surface[3]
surface = [u1, v1, w1]
# set the surface for grain boundary.
if surface is None:
if lat_type.lower() == 'c':
surface = r_axis
else:
if lat_type.lower() == 'h':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == 'r':
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array([[1, cos_alpha, cos_alpha], [cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1]])
elif lat_type.lower() == 't':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == 'o':
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array([[1, 0, 0], [0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]]])
else:
raise RuntimeError('Lattice type has not implemented.')
surface = np.matmul(r_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in surface]
least_mul = reduce(lcm, [f.denominator for f in fractions])
surface = [int(round(x * least_mul)) for x in surface]
if reduce(gcd, surface) != 1:
index = reduce(gcd, surface)
surface = [int(round(x / index)) for x in surface]
if lat_type.lower() == 'h':
# set the value for u,v,w,mu,mv,m,n,d,x
# check the reference for the meaning of these parameters
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2/a2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
else:
mu, mv = ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
d = (u ** 2 + v ** 2 - u * v) * mv + w ** 2 * mu
if abs(angle - 180.0) < 1.e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) /
np.sqrt(float(d) / 3.0 / mu)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
# construct the rotation matrix, check reference for details
r_list = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 -
2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) *
n ** 2 + 3 * mu * m ** 2]
m = -1 * m
r_list_inv = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 -
2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) *
n ** 2 + 3 * mu * m ** 2]
m = -1 * m
F = 3 * mu * m ** 2 + d * n ** 2
all_list = r_list + r_list_inv + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
elif lat_type.lower() == 'r':
# set the value for u,v,w,mu,mv,m,n,d
# check the reference for the meaning of these parameters
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError('For irrational ratio_alpha, CSL only exist for [1,1,1]'
'or [u, v, -(u+v)] and m =0')
else:
mu, mv = ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
d = (u ** 2 + v ** 2 + w ** 2) * (mu - 2 * mv) + \
2 * mv * (v * w + w * u + u * v)
if abs(angle - 180.0) < 1.e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) /
np.sqrt(float(d) / mu)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
# construct the rotation matrix, check reference for details
r_list = [(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2 +
2 * mv * (v - w) * m * n - 2 * mv * v * w * n ** 2 +
mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2 +
2 * mv * (w - u) * m * n - 2 * mv * u * w * n ** 2 +
mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2 +
2 * mv * (u - v) * m * n - 2 * mv * u * v * n ** 2 +
mu * m ** 2]
m = -1 * m
r_list_inv = [(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2 +
2 * mv * (v - w) * m * n - 2 * mv * v * w * n ** 2 +
mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2 +
2 * mv * (w - u) * m * n - 2 * mv * u * w * n ** 2 +
mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2 +
2 * mv * (u - v) * m * n - 2 * mv * u * v * n ** 2 +
mu * m ** 2]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = r_list_inv + r_list + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
else:
u, v, w = r_axis
if lat_type.lower() == 'c':
mu = 1
lam = 1
mv = 1
elif lat_type.lower() == 't':
if ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2/a2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == 'o':
if None in ratio:
mu, lam, mv = ratio
non_none = [i for i in ratio if i is not None]
if len(non_none) < 2:
raise RuntimeError('No CSL exist for two irrational numbers')
non1, non2 = non_none
if mu is None:
lam = non1
mv = non2
mu = 1
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
elif lam is None:
mu = non1
mv = non2
lam = 1
if v != 0:
if u != 0 or (w != 0):
raise RuntimeError('For irrational b2, CSL only exist for [0,1,0] '
'or [u,0,w] and m = 0')
elif mv is None:
mu = non1
lam = non2
mv = 1
if u != 0:
if w != 0 or (v != 0):
raise RuntimeError('For irrational a2, CSL only exist for [1,0,0] '
'or [0,v,w] and m = 0')
else:
mu, lam, mv = ratio
if u == 0 and v == 0:
mu = 1
if u == 0 and w == 0:
lam = 1
if v == 0 and w == 0:
mv = 1
# make sure mu, lambda, mv are coprime integers.
if reduce(gcd, [mu, lam, mv]) != 1:
temp = reduce(gcd, [mu, lam, mv])
mu = int(round(mu / temp))
mv = int(round(mv / temp))
lam = int(round(lam / temp))
d = (mv * u ** 2 + lam * v ** 2) * mv + w ** 2 * mu * mv
if abs(angle - 180.0) < 1.e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) /
np.sqrt(d / mu / lam)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
r_list = [(u ** 2 * mv * mv - lam * v ** 2 * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv -
v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2]
m = -1 * m
r_list_inv = [(u ** 2 * mv * mv - lam * v ** 2 * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv -
v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2]
m = -1 * m
F = mu * lam * m ** 2 + d * n ** 2
all_list = r_list + r_list_inv + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
if (sigma > 1000):
raise RuntimeError('Sigma >1000 too large. Are you sure what you are doing, '
'Please check the GB if exist')
# transform surface, r_axis, r_matrix in terms of primitive lattice
surface = np.matmul(surface, np.transpose(trans_cry))
fractions = [Fraction(x).limit_denominator() for x in surface]
least_mul = reduce(lcm, [f.denominator for f in fractions])
surface = [int(round(x * least_mul)) for x in surface]
if reduce(gcd, surface) != 1:
index = reduce(gcd, surface)
surface = [int(round(x / index)) for x in surface]
r_axis = np.rint(np.matmul(r_axis, np.linalg.inv(trans_cry))).astype(int)
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
r_matrix = np.dot(np.dot(np.linalg.inv(trans_cry.T), r_matrix), trans_cry.T)
# set one vector of the basis to the rotation axis direction, and
# obtain the corresponding transform matrix
eye = np.eye(3, dtype=np.int)
for h in range(3):
if abs(r_axis[h]) != 0:
eye[h] = np.array(r_axis)
k = h + 1 if h + 1 < 3 else abs(2 - h)
l = h + 2 if h + 2 < 3 else abs(1 - h)
break
trans = eye.T
new_rot = np.array(r_matrix)
# with the rotation matrix to construct the CSL lattice, check reference for details
fractions = [Fraction(x).limit_denominator() for x in new_rot[:, k]]
least_mul = reduce(lcm, [f.denominator for f in fractions])
scale = np.zeros((3, 3))
scale[h, h] = 1
scale[k, k] = least_mul
scale[l, l] = sigma / least_mul
for i in range(least_mul):
check_int = i * new_rot[:, k] + (sigma / least_mul) * new_rot[:, l]
if all([np.round(x, 5).is_integer() for x in list(check_int)]):
n_final = i
break
try:
n_final
except NameError:
raise RuntimeError('Something is wrong. Check if this GB exists or not')
scale[k, l] = n_final
# each row of mat_csl is the CSL lattice vector
csl_init = np.rint(np.dot(np.dot(r_matrix, trans), scale)).astype(int).T
if abs(r_axis[h]) > 1:
csl_init = GrainBoundaryGenerator.reduce_mat(np.array(csl_init), r_axis[h], r_matrix)
csl = np.rint(Lattice(csl_init).get_niggli_reduced_lattice().matrix).astype(int)
# find the best slab supercell in terms of the conventional cell from the csl lattice,
# which is the transformation matrix
# now trans_cry is the transformation matrix from crystal to cartesian coordinates.
# for cubic, do not need to change.
if lat_type.lower() != 'c':
if lat_type.lower() == 'h':
trans_cry = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0],
[0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == 'r':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry = np.array([[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)]])
else:
trans_cry = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
t1_final = GrainBoundaryGenerator.slab_from_csl(csl, surface, normal, trans_cry, max_search=max_search,
quick_gen=quick_gen)
t2_final = np.array(np.rint(np.dot(t1_final, np.linalg.inv(r_matrix.T)))).astype(int)
return t1_final, t2_final
@staticmethod
def enum_sigma_cubic(cutoff, r_axis):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in cubic system.
The algorithm for this code is from reference, Acta Cryst, A40,108(1984)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angles of one grain respect to
the other grain.
When generate the microstructures of the grain boundary using these angles,
you need to analyze the symmetry of the structure. Different angles may
result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# count the number of odds in r_axis
odd_r = len(list(filter(lambda x: x % 2 == 1, r_axis)))
# Compute the max n we need to enumerate.
if odd_r == 3:
a_max = 4
elif odd_r == 0:
a_max = 1
else:
a_max = 2
n_max = int(np.sqrt(cutoff * a_max / sum(np.array(r_axis) ** 2)))
# enumerate all possible n, m to give possible sigmas within the cutoff.
for n_loop in range(1, n_max + 1):
n = n_loop
m_max = int(np.sqrt(cutoff * a_max - n ** 2 * sum(np.array(r_axis) ** 2)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
if m == 0:
n = 1
else:
n = n_loop
# construct the quadruple [m, U,V,W], count the number of odds in
# quadruple to determine the parameter a, refer to the reference
quadruple = [m] + [x * n for x in r_axis]
odd_qua = len(list(filter(lambda x: x % 2 == 1, quadruple)))
if odd_qua == 4:
a = 4
elif odd_qua == 2:
a = 2
else:
a = 1
sigma = int(round((m ** 2 + n ** 2 * sum(np.array(r_axis) ** 2)) / a))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
return sigmas
@staticmethod
def enum_sigma_hex(cutoff, r_axis, c2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in hexagonal system.
The algorithm for this code is from reference, Acta Cryst, A38,550(1982)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary.
c2_a2_ratio (list of two integers, e.g. mu, mv):
mu/mv is the square of the hexagonal axial ratio, which is rational
number. If irrational, set c2_a2_ratio = None
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
else:
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if c2_a2_ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2/a2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
else:
mu, mv = c2_a2_ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2 - u * v) * mv + w ** 2 * mu
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 12 * mu * mv) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if (c2_a2_ratio is None) and w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 12 * mu * mv - n ** 2 * d) / (3 * mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 -
2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) *
n ** 2 + 3 * mu * m ** 2]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 -
2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) *
n ** 2 + 3 * mu * m ** 2]
m = -1 * m
F = 3 * mu * m ** 2 + d * n ** 2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((3 * mu * m ** 2 + d * n ** 2) / com_fac))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / 3.0 / mu)) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / 3.0 / mu)) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_rho(cutoff, r_axis, ratio_alpha):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in rhombohedral system.
The algorithm for this code is from reference, Acta Cryst, A45,505(1989).
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary, with the format of [u,v,w]
or Weber indices [u, v, t, w].
ratio_alpha (list of two integers, e.g. mu, mv):
mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha) with rational number.
If irrational, set ratio_alpha = None.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio_alpha is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError('For irrational ratio_alpha, CSL only exist for [1,1,1]'
'or [u, v, -(u+v)] and m =0')
else:
mu, mv = ratio_alpha
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2 + w ** 2) * (mu - 2 * mv) + \
2 * mv * (v * w + w * u + u * v)
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv))) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if ratio_alpha is None and u + v + w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv)) - n ** 2 * d) / (mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2 +
2 * mv * (v - w) * m * n - 2 * mv * v * w * n ** 2 +
mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2 +
2 * mv * (w - u) * m * n - 2 * mv * u * w * n ** 2 +
mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2 +
2 * mv * (u - v) * m * n - 2 * mv * u * v * n ** 2 +
mu * m ** 2]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2 +
2 * mv * (v - w) * m * n - 2 * mv * v * w * n ** 2 +
mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2 +
2 * mv * (w - u) * m * n - 2 * mv * u * w * n ** 2 +
mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2 +
2 * mv * (u - v) * m * n - 2 * mv * u * v * n ** 2 +
mu * m ** 2]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round(abs(F / com_fac)))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) \
/ np.pi * 180.0
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_tet(cutoff, r_axis, c2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in tetragonal system.
The algorithm for this code is from reference, Acta Cryst, B46,117(1990)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
c2_a2_ratio (list of two integers, e.g. mu, mv):
mu/mv is the square of the tetragonal axial ratio with rational number.
if irrational, set c2_a2_ratio = None
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if c2_a2_ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2/a2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
else:
mu, mv = c2_a2_ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2) * mv + w ** 2 * mu
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 4 * mu * mv) / d))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if c2_a2_ratio is None and w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 4 * mu * mv - n ** 2 * d) / mu))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
mu * m ** 2,
2 * v * u * mv * n ** 2 - 2 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * v * mu * m * n,
2 * u * v * mv * n ** 2 + 2 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 +
mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * u * mu * m * n,
2 * u * w * mv * n ** 2 - 2 * v * mv * m * n,
2 * v * w * mv * n ** 2 + 2 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv) * n ** 2 +
mu * m ** 2]
m = -1 * m
# inverse of rotation matrix
R_list_inv = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
mu * m ** 2,
2 * v * u * mv * n ** 2 - 2 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * v * mu * m * n,
2 * u * v * mv * n ** 2 + 2 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 +
mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * u * mu * m * n,
2 * u * w * mv * n ** 2 - 2 * v * mv * m * n,
2 * v * w * mv * n ** 2 + 2 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv) * n ** 2 +
mu * m ** 2]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = R_list + R_list_inv + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((mu * m ** 2 + d * n ** 2) / com_fac))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_ort(cutoff, r_axis, c2_b2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in orthorhombic system.
The algorithm for this code is from reference, Scipta Metallurgica 27, 291(1992)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
c2_b2_a2_ratio (list of three integers, e.g. mu,lamda, mv):
mu:lam:mv is the square of the orthorhombic axial ratio with rational
numbers. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, lambda, mv are coprime integers.
if None in c2_b2_a2_ratio:
mu, lam, mv = c2_b2_a2_ratio
non_none = [i for i in c2_b2_a2_ratio if i is not None]
if len(non_none) < 2:
raise RuntimeError('No CSL exist for two irrational numbers')
non1, non2 = non_none
if reduce(gcd, non_none) != 1:
temp = reduce(gcd, non_none)
non1 = int(round(non1 / temp))
non2 = int(round(non2 / temp))
if mu is None:
lam = non1
mv = non2
mu = 1
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
elif lam is None:
mu = non1
mv = non2
lam = 1
if v != 0:
if u != 0 or (w != 0):
raise RuntimeError('For irrational b2, CSL only exist for [0,1,0] '
'or [u,0,w] and m = 0')
elif mv is None:
mu = non1
lam = non2
mv = 1
if u != 0:
if w != 0 or (v != 0):
raise RuntimeError('For irrational a2, CSL only exist for [1,0,0] '
'or [0,v,w] and m = 0')
else:
mu, lam, mv = c2_b2_a2_ratio
if reduce(gcd, c2_b2_a2_ratio) != 1:
temp = reduce(gcd, c2_b2_a2_ratio)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
lam = int(round(lam / temp))
if u == 0 and v == 0:
mu = 1
if u == 0 and w == 0:
lam = 1
if v == 0 and w == 0:
mv = 1
# refer to the meaning of d in reference
d = (mv * u ** 2 + lam * v ** 2) * mv + w ** 2 * mu * mv
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 4 * mu * mv * mv * lam) / d))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
mu_temp, lam_temp, mv_temp = c2_b2_a2_ratio
if (mu_temp is None and w == 0) or (lam_temp is None and v == 0) \
or (mv_temp is None and u == 0):
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 4 * mu * mv * lam * mv -
n ** 2 * d) / mu / lam))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [(u ** 2 * mv * mv - lam * v ** 2 * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv -
v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2]
m = -1 * m
# inverse of rotation matrix
R_list_inv = [(u ** 2 * mv * mv - lam * v ** 2 * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv -
v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2]
m = -1 * m
F = mu * lam * m ** 2 + d * n ** 2
all_list = R_list + R_list_inv + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((mu * lam * m ** 2 + d * n ** 2) / com_fac))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu / lam)) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu / lam)) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_possible_plane_cubic(plane_cutoff, r_axis, r_angle):
"""
Find all possible plane combinations for GBs given a rotaion axis and angle for
cubic system, and classify them to different categories, including 'Twist',
'Symmetric tilt', 'Normal tilt', 'Mixed' GBs.
Args:
plane_cutoff (integer): the cutoff of plane miller index.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
r_angle (float): rotation angle of the GBs.
Returns:
all_combinations (dict):
dictionary with keys as GB type, e.g. 'Twist','Symmetric tilt',etc.
and values as the combination of the two plane miller index
(GB plane and joining plane).
"""
all_combinations = {}
all_combinations['Symmetric tilt'] = []
all_combinations['Twist'] = []
all_combinations['Normal tilt'] = []
all_combinations['Mixed'] = []
sym_plane = symm_group_cubic([[1, 0, 0], [1, 1, 0]])
j = np.arange(0, plane_cutoff + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
miller = np.array(combination)
miller = miller[np.argsort(np.linalg.norm(miller, axis=1))]
for i, val in enumerate(miller):
if reduce(gcd, val) == 1:
matrix = GrainBoundaryGenerator.get_trans_mat(r_axis, r_angle, surface=val, quick_gen=True)
vec = np.cross(matrix[1][0], matrix[1][1])
miller2 = GrainBoundaryGenerator.vec_to_surface(vec)
if np.all(np.abs(np.array(miller2)) <= plane_cutoff):
cos_1 = abs(np.dot(val, r_axis) / np.linalg.norm(val) / np.linalg.norm(r_axis))
if 1 - cos_1 < 1.e-5:
all_combinations['Twist'].append([list(val), miller2])
elif cos_1 < 1.e-8:
sym_tilt = False
if np.sum(np.abs(val)) == np.sum(np.abs(miller2)):
ave = (np.array(val) + np.array(miller2)) / 2
ave1 = (np.array(val) - np.array(miller2)) / 2
for plane in sym_plane:
cos_2 = abs(np.dot(ave, plane) / np.linalg.norm(ave) / np.linalg.norm(plane))
cos_3 = abs(np.dot(ave1, plane) / np.linalg.norm(ave1) / np.linalg.norm(plane))
if 1 - cos_2 < 1.e-5 or 1 - cos_3 < 1.e-5:
all_combinations['Symmetric tilt'].append([list(val), miller2])
sym_tilt = True
break
if not sym_tilt:
all_combinations['Normal tilt'].append([list(val), miller2])
else:
all_combinations['Mixed'].append([list(val), miller2])
return all_combinations
@staticmethod
def get_rotation_angle_from_sigma(sigma, r_axis, lat_type='C', ratio=None):
"""
Find all possible rotation angle for the given sigma value.
Args:
sigma (integer):
sigma value provided
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w for hex/rho system only):
the rotation axis of the grain boundary.
lat_type ( one character):
'c' or 'C': cubic system
't' or 'T': tetragonal system
'o' or 'O': orthorhombic system
'h' or 'H': hexagonal system
'r' or 'R': rhombohedral system
default to cubic system
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha)/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
Returns:
rotation_angles corresponding to the provided sigma value.
If the sigma value is not correct, return the rotation angle corresponding
to the correct possible sigma value right smaller than the wrong sigma value provided.
"""
if lat_type.lower() == 'c':
logger.info('Make sure this is for cubic system')
sigma_dict = GrainBoundaryGenerator.enum_sigma_cubic(cutoff=sigma, r_axis=r_axis)
elif lat_type.lower() == 't':
logger.info('Make sure this is for tetragonal system')
if ratio is None:
logger.info('Make sure this is for irrational c2/a2 ratio')
elif len(ratio) != 2:
raise RuntimeError('Tetragonal system needs correct c2/a2 ratio')
sigma_dict = GrainBoundaryGenerator.enum_sigma_tet(cutoff=sigma, r_axis=r_axis, c2_a2_ratio=ratio)
elif lat_type.lower() == 'o':
logger.info('Make sure this is for orthorhombic system')
if len(ratio) != 3:
raise RuntimeError('Orthorhombic system needs correct c2:b2:a2 ratio')
sigma_dict = GrainBoundaryGenerator.enum_sigma_ort(cutoff=sigma, r_axis=r_axis, c2_b2_a2_ratio=ratio)
elif lat_type.lower() == 'h':
logger.info('Make sure this is for hexagonal system')
if ratio is None:
logger.info('Make sure this is for irrational c2/a2 ratio')
elif len(ratio) != 2:
raise RuntimeError('Hexagonal system needs correct c2/a2 ratio')
sigma_dict = GrainBoundaryGenerator.enum_sigma_hex(cutoff=sigma, r_axis=r_axis, c2_a2_ratio=ratio)
elif lat_type.lower() == 'r':
logger.info('Make sure this is for rhombohedral system')
if ratio is None:
logger.info('Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio')
elif len(ratio) != 2:
raise RuntimeError('Rhombohedral system needs correct '
'(1+2*cos(alpha)/cos(alpha) ratio')
sigma_dict = GrainBoundaryGenerator.enum_sigma_rho(cutoff=sigma, r_axis=r_axis, ratio_alpha=ratio)
else:
raise RuntimeError('Lattice type not implemented')
sigmas = list(sigma_dict.keys())
if not sigmas:
raise RuntimeError('This is a wriong sigma value, and no sigma exists smaller than this value.')
if sigma in sigmas:
rotation_angles = sigma_dict[sigma]
else:
sigmas.sort()
warnings.warn("This is not the possible sigma value according to the rotation axis!"
"The nearest neighbor sigma and its corresponding angle are returned")
rotation_angles = sigma_dict[sigmas[-1]]
rotation_angles.sort()
return rotation_angles
@staticmethod
def slab_from_csl(csl, surface, normal, trans_cry, max_search=20, quick_gen=False):
"""
By linear operation of csl lattice vectors to get the best corresponding
slab lattice. That is the area of a,b vectors (within the surface plane)
is the smallest, the c vector first, has shortest length perpendicular
to surface [h,k,l], second, has shortest length itself.
Args:
csl (3 by 3 integer array):
input csl lattice.
surface (list of three integers, e.g. h, k, l):
the miller index of the surface, with the format of [h,k,l]
normal (logic):
determine if the c vector needs to perpendicular to surface
trans_cry (3 by 3 array):
transform matrix from crystal system to orthogonal system
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane.
quick_gen (bool): whether to quickly generate a supercell, no need to find the smallest
cell if set to true.
Returns:
t_matrix: a slab lattice ( 3 by 3 integer array):
"""
# set the transform matrix in real space
trans = trans_cry
# transform matrix in reciprocal space
ctrans = np.linalg.inv(trans.T)
t_matrix = csl.copy()
# vectors constructed from csl that perpendicular to surface
ab_vector = []
# obtain the miller index of surface in terms of csl.
miller = np.matmul(surface, csl.T)
if reduce(gcd, miller) != 1:
miller = [int(round(x / reduce(gcd, miller))) for x in miller]
miller_nonzero = []
# quickly generate a supercell, normal is not work in this way
if quick_gen:
scale_factor = []
eye = np.eye(3, dtype=np.int)
for i, j in enumerate(miller):
if j == 0:
scale_factor.append(eye[i])
else:
miller_nonzero.append(i)
if len(scale_factor) < 2:
index_len = len(miller_nonzero)
for i in range(index_len):
for j in range(i + 1, index_len):
lcm_miller = lcm(miller[miller_nonzero[i]], miller[miller_nonzero[j]])
l = [0, 0, 0]
l[miller_nonzero[i]] = -int(round(lcm_miller / miller[miller_nonzero[i]]))
l[miller_nonzero[j]] = int(round(lcm_miller / miller[miller_nonzero[j]]))
scale_factor.append(l)
if len(scale_factor) == 2:
break
t_matrix[0] = np.array(np.dot(scale_factor[0], csl))
t_matrix[1] = np.array(np.dot(scale_factor[1], csl))
t_matrix[2] = csl[miller_nonzero[0]]
if abs(np.linalg.det(t_matrix)) > 1000:
warnings.warn('Too large matrix. Suggest to use quick_gen=False')
return t_matrix
for i, j in enumerate(miller):
if j == 0:
ab_vector.append(csl[i])
else:
c_index = i
miller_nonzero.append(j)
if len(miller_nonzero) > 1:
t_matrix[2] = csl[c_index]
index_len = len(miller_nonzero)
lcm_miller = []
for i in range(index_len):
for j in range(i + 1, index_len):
com_gcd = gcd(miller_nonzero[i], miller_nonzero[j])
mil1 = int(round(miller_nonzero[i] / com_gcd))
mil2 = int(round(miller_nonzero[j] / com_gcd))
lcm_miller.append(max(abs(mil1), abs(mil2)))
lcm_sorted = sorted(lcm_miller)
if index_len == 2:
max_j = lcm_sorted[0]
else:
max_j = lcm_sorted[1]
else:
if not normal:
t_matrix[0] = ab_vector[0]
t_matrix[1] = ab_vector[1]
t_matrix[2] = csl[c_index]
return t_matrix
else:
max_j = abs(miller_nonzero[0])
if max_j > max_search:
max_j = max_search
# area of a, b vectors
area = None
# length of c vector
c_norm = np.linalg.norm(np.matmul(t_matrix[2], trans))
# c vector length along the direction perpendicular to surface
c_length = np.abs(np.dot(t_matrix[2], surface))
# check if the init c vector perpendicular to the surface
if normal:
c_cross = np.cross(np.matmul(t_matrix[2], trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.e-8:
normal_init = True
else:
normal_init = False
j = np.arange(0, max_j + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
for i in combination:
if reduce(gcd, i) == 1:
temp = np.dot(np.array(i), csl)
if abs(np.dot(temp, surface) - 0) < 1.e-8:
ab_vector.append(temp)
else:
# c vector length along the direction perpendicular to surface
c_len_temp = np.abs(np.dot(temp, surface))
# c vector length itself
c_norm_temp = np.linalg.norm(np.matmul(temp, trans))
if normal:
c_cross = np.cross(np.matmul(temp, trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.e-8:
if normal_init:
if c_norm_temp < c_norm:
t_matrix[2] = temp
c_norm = c_norm_temp
else:
c_norm = c_norm_temp
normal_init = True
t_matrix[2] = temp
else:
if c_len_temp < c_length or \
(abs(c_len_temp - c_length) < 1.e-8 and c_norm_temp < c_norm):
t_matrix[2] = temp
c_norm = c_norm_temp
c_length = c_len_temp
if normal and (not normal_init):
logger.info('Did not find the perpendicular c vector, increase max_j')
while (not normal_init):
if max_j == max_search:
warnings.warn('Cannot find the perpendicular c vector, please increase max_search')
break
max_j = 3 * max_j
if max_j > max_search:
max_j = max_search
j = np.arange(0, max_j + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
for i in combination:
if reduce(gcd, i) == 1:
temp = np.dot(np.array(i), csl)
if abs(np.dot(temp, surface) - 0) > 1.e-8:
c_cross = np.cross(np.matmul(temp, trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.e-8:
# c vetor length itself
c_norm_temp = np.linalg.norm(np.matmul(temp, trans))
if normal_init:
if c_norm_temp < c_norm:
t_matrix[2] = temp
c_norm = c_norm_temp
else:
c_norm = c_norm_temp
normal_init = True
t_matrix[2] = temp
if normal_init:
logger.info('Found perpendicular c vector')
# find the best a, b vectors with their formed area smallest and average norm of a,b smallest.
for i in itertools.combinations(ab_vector, 2):
area_temp = np.linalg.norm(np.cross(np.matmul(i[0], trans),
np.matmul(i[1], trans)))
if abs(area_temp - 0) > 1.e-8:
ab_norm_temp = np.linalg.norm(np.matmul(i[0], trans)) + \
np.linalg.norm(np.matmul(i[1], trans))
if area is None:
area = area_temp
ab_norm = ab_norm_temp
t_matrix[0] = i[0]
t_matrix[1] = i[1]
elif area_temp < area:
t_matrix[0] = i[0]
t_matrix[1] = i[1]
area = area_temp
ab_norm = ab_norm_temp
elif abs(area - area_temp) < 1.e-8 and ab_norm_temp < ab_norm:
t_matrix[0] = i[0]
t_matrix[1] = i[1]
area = area_temp
ab_norm = ab_norm_temp
# make sure we have a left-handed crystallographic system
if np.linalg.det(np.matmul(t_matrix, trans)) < 0:
t_matrix *= -1
if normal and abs(np.linalg.det(t_matrix)) > 1000:
warnings.warn('Too large matrix. Suggest to use Normal=False')
return t_matrix
@staticmethod
def reduce_mat(mat, mag, r_matrix):
"""
Reduce integer array mat's determinant mag times by linear combination
of its row vectors, so that the new array after rotation (r_matrix) is
still an integer array
Args:
mat (3 by 3 array): input matrix
mag (integer): reduce times for the determinant
r_matrix (3 by 3 array): rotation matrix
Return:
the reduced integer array
"""
max_j = abs(int(round(np.linalg.det(mat) / mag)))
reduced = False
for h in range(3):
k = h + 1 if h + 1 < 3 else abs(2 - h)
l = h + 2 if h + 2 < 3 else abs(1 - h)
j = np.arange(-max_j, max_j + 1)
for j1, j2 in itertools.product(j, repeat=2):
temp = mat[h] + j1 * mat[k] + j2 * mat[l]
if all([np.round(x, 5).is_integer() for x in list(temp / mag)]):
mat_copy = mat.copy()
mat_copy[h] = np.array([int(round(ele / mag)) for ele in temp])
new_mat = np.dot(mat_copy, np.linalg.inv(r_matrix.T))
if all([np.round(x, 5).is_integer() for x in list(np.ravel(new_mat))]):
reduced = True
mat[h] = np.array([int(round(ele / mag)) for ele in temp])
break
if reduced:
break
if not reduced:
warnings.warn("Matrix reduction not performed, may lead to non-primitive gb cell.")
return mat
@staticmethod
def vec_to_surface(vec):
"""
Transform a float vector to a surface miller index with integers.
Args:
vec (1 by 3 array float vector): input float vector
Return:
the surface miller index of the input vector.
"""
miller = [None] * 3
index = []
for i, value in enumerate(vec):
if abs(value) < 1.e-8:
miller[i] = 0
else:
index.append(i)
if len(index) == 1:
miller[index[0]] = 1
else:
min_index = np.argmin([i for i in vec if i != 0])
true_index = index[min_index]
index.pop(min_index)
frac = []
for i, value in enumerate(index):
frac.append(Fraction(vec[value] / vec[true_index]).limit_denominator(100))
if len(index) == 1:
miller[true_index] = frac[0].denominator
miller[index[0]] = frac[0].numerator
else:
com_lcm = lcm(frac[0].denominator, frac[1].denominator)
miller[true_index] = com_lcm
miller[index[0]] = frac[0].numerator * int(round((com_lcm / frac[0].denominator)))
miller[index[1]] = frac[1].numerator * int(round((com_lcm / frac[1].denominator)))
return miller
def factors(n):
"""
Compute the factors of a integer.
Args:
n: the input integer
Returns:
a set of integers that are the factors of the input integer.
"""
return set(reduce(list.__add__,
([i, n // i] for i in range(1, int(np.sqrt(n)) + 1) if n % i == 0)))
def fix_pbc(structure, matrix=None):
"""
Set all frac_coords of the input structure within [0,1].
Args:
structure (pymatgen structure object):
input structure
matrix (lattice matrix, 3 by 3 array/matrix)
new structure's lattice matrix, if none, use
input structure's matrix
Return:
new structure with fixed frac_coords and lattice matrix
"""
spec = []
coords = []
if matrix is None:
latte = Lattice(structure.lattice.matrix)
else:
latte = Lattice(matrix)
for site in structure:
spec.append(site.specie)
coord = np.array(site.frac_coords)
for i in range(3):
coord[i] -= floor(coord[i])
if np.allclose(coord[i], 1):
coord[i] = 0
elif np.allclose(coord[i], 0):
coord[i] = 0
else:
coord[i] = round(coord[i], 7)
coords.append(coord)
return Structure(latte, spec, coords, site_properties=structure.site_properties)
def symm_group_cubic(mat):
"""
obtain cubic symmetric eqivalents of the list of vectors.
Args:
matrix (lattice matrix, n by 3 array/matrix)
Return:
cubic symmetric eqivalents of the list of vectors.
"""
sym_group = np.zeros([24, 3, 3])
sym_group[0, :] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
sym_group[1, :] = [[1, 0, 0], [0, -1, 0], [0, 0, -1]]
sym_group[2, :] = [[-1, 0, 0], [0, 1, 0], [0, 0, -1]]
sym_group[3, :] = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
sym_group[4, :] = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]
sym_group[5, :] = [[0, -1, 0], [1, 0, 0], [0, 0, 1]]
sym_group[6, :] = [[0, 1, 0], [-1, 0, 0], [0, 0, 1]]
sym_group[7, :] = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
sym_group[8, :] = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]
sym_group[9, :] = [[-1, 0, 0], [0, 0, 1], [0, 1, 0]]
sym_group[10, :] = [[1, 0, 0], [0, 0, -1], [0, 1, 0]]
sym_group[11, :] = [[1, 0, 0], [0, 0, 1], [0, -1, 0]]
sym_group[12, :] = [[0, 1, 0], [0, 0, 1], [1, 0, 0]]
sym_group[13, :] = [[0, 1, 0], [0, 0, -1], [-1, 0, 0]]
sym_group[14, :] = [[0, -1, 0], [0, 0, 1], [-1, 0, 0]]
sym_group[15, :] = [[0, -1, 0], [0, 0, -1], [1, 0, 0]]
sym_group[16, :] = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]
sym_group[17, :] = [[0, 0, 1], [-1, 0, 0], [0, -1, 0]]
sym_group[18, :] = [[0, 0, -1], [1, 0, 0], [0, -1, 0]]
sym_group[19, :] = [[0, 0, -1], [-1, 0, 0], [0, 1, 0]]
sym_group[20, :] = [[0, 0, -1], [0, -1, 0], [-1, 0, 0]]
sym_group[21, :] = [[0, 0, -1], [0, 1, 0], [1, 0, 0]]
sym_group[22, :] = [[0, 0, 1], [0, -1, 0], [1, 0, 0]]
sym_group[23, :] = [[0, 0, 1], [0, 1, 0], [-1, 0, 0]]
mat = np.atleast_2d(mat)
all_vectors = []
for sym in sym_group:
for vec in mat:
all_vectors.append(np.dot(sym, vec))
return np.unique(np.array(all_vectors), axis=0)
| mit |
slightstone/SickRage | sickbeard/providers/rsstorrent.py | 1 | 5772 | # Author: Mr_Orange
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import sickbeard
import generic
from sickbeard import helpers
from sickbeard import encodingKludge as ek
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.exceptions import ex
from lib import requests
from lib.bencode import bdecode
class TorrentRssProvider(generic.TorrentProvider):
def __init__(self, name, url, cookies='', titleTAG='title', search_mode='eponly', search_fallback=False, enable_daily=False,
enable_backlog=False):
generic.TorrentProvider.__init__(self, name)
self.cache = TorrentRssCache(self)
self.urls = {'base_url': re.sub('\/$', '', url)}
self.url = self.urls['base_url']
self.enabled = True
self.ratio = None
self.supportsBacklog = False
self.search_mode = search_mode
self.search_fallback = search_fallback
self.enable_daily = enable_daily
self.enable_backlog = enable_backlog
self.cookies = cookies
self.titleTAG = titleTAG
def configStr(self):
return "%s|%s|%s|%s|%d|%s|%d|%d|%d" % (self.name or '',
self.url or '',
self.cookies or '',
self.titleTAG or '',
self.enabled,
self.search_mode or '',
self.search_fallback,
self.enable_daily,
self.enable_backlog)
def imageName(self):
if ek.ek(os.path.isfile,
ek.ek(os.path.join, sickbeard.PROG_DIR, 'gui', sickbeard.GUI_NAME, 'images', 'providers',
self.getID() + '.png')):
return self.getID() + '.png'
return 'torrentrss.png'
def isEnabled(self):
return self.enabled
def _get_title_and_url(self, item):
title = item.get(self.titleTAG)
if title:
title = u'' + title
title = title.replace(' ', '.')
attempt_list = [lambda: item.get('torrent_magneturi'),
lambda: item.enclosures[0].href,
lambda: item.get('link')]
url = None
for cur_attempt in attempt_list:
try:
url = cur_attempt()
except:
continue
if title and url:
break
return title, url
def validateRSS(self):
try:
if self.cookies:
cookie_validator = re.compile("^(\w+=\w+)(;\w+=\w+)*$")
if not cookie_validator.match(self.cookies):
return (False, 'Cookie is not correctly formatted: ' + self.cookies)
data = self.cache._getRSSData()['entries']
if not data:
return (False, 'No items found in the RSS feed ' + self.url)
(title, url) = self._get_title_and_url(data[0])
if not title:
return (False, 'Unable to get title from first item')
if not url:
return (False, 'Unable to get torrent url from first item')
if url.startswith('magnet:') and re.search('urn:btih:([\w]{32,40})', url):
return (True, 'RSS feed Parsed correctly')
else:
if self.cookies:
requests.utils.add_dict_to_cookiejar(self.session.cookies,
dict(x.rsplit('=', 1) for x in (self.cookies.split(';'))))
torrent_file = self.getURL(url)
try:
bdecode(torrent_file)
except Exception, e:
self.dumpHTML(torrent_file)
return (False, 'Torrent link is not a valid torrent file: ' + ex(e))
return (True, 'RSS feed Parsed correctly')
except Exception, e:
return (False, 'Error when trying to load RSS: ' + ex(e))
def dumpHTML(self, data):
dumpName = ek.ek(os.path.join, sickbeard.CACHE_DIR, 'custom_torrent.html')
try:
fileOut = open(dumpName, 'wb')
fileOut.write(data)
fileOut.close()
helpers.chmodAsParent(dumpName)
except IOError, e:
logger.log("Unable to save the file: " + ex(e), logger.ERROR)
return False
logger.log(u"Saved custom_torrent html dump " + dumpName + " ", logger.INFO)
return True
def seedRatio(self):
return self.ratio
class TorrentRssCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
self.minTime = 15
def _getRSSData(self):
logger.log(u"TorrentRssCache cache update URL: " + self.provider.url, logger.DEBUG)
if self.provider.cookies:
self.provider.headers.update({'Cookie': self.provider.cookies})
return self.getRSSFeed(self.provider.url)
| gpl-3.0 |
rupran/ansible | test/integration/targets/module_utils/library/test.py | 91 | 2819 | #!/usr/bin/python
results = {}
# Test import with no from
import ansible.module_utils.foo0
results['foo0'] = ansible.module_utils.foo0.data
# Test depthful import with no from
import ansible.module_utils.bar0.foo
results['bar0'] = ansible.module_utils.bar0.foo.data
# Test import of module_utils/foo1.py
from ansible.module_utils import foo1
results['foo1'] = foo1.data
# Test import of an identifier inside of module_utils/foo2.py
from ansible.module_utils.foo2 import data
results['foo2'] = data
# Test import of module_utils/bar1/__init__.py
from ansible.module_utils import bar1
results['bar1'] = bar1.data
# Test import of an identifier inside of module_utils/bar2/__init__.py
from ansible.module_utils.bar2 import data
results['bar2'] = data
# Test import of module_utils/baz1/one.py
from ansible.module_utils.baz1 import one
results['baz1'] = one.data
# Test import of an identifier inside of module_utils/baz2/one.py
from ansible.module_utils.baz2.one import data
results['baz2'] = data
# Test import of module_utils/spam1/ham/eggs/__init__.py
from ansible.module_utils.spam1.ham import eggs
results['spam1'] = eggs.data
# Test import of an identifier inside module_utils/spam2/ham/eggs/__init__.py
from ansible.module_utils.spam2.ham.eggs import data
results['spam2'] = data
# Test import of module_utils/spam3/ham/bacon.py
from ansible.module_utils.spam3.ham import bacon
results['spam3'] = bacon.data
# Test import of an identifier inside of module_utils/spam4/ham/bacon.py
from ansible.module_utils.spam4.ham.bacon import data
results['spam4'] = data
# Test import of module_utils.spam5.ham bacon and eggs (modules)
from ansible.module_utils.spam5.ham import bacon, eggs
results['spam5'] = (bacon.data, eggs.data)
# Test import of module_utils.spam6.ham bacon and eggs (identifiers)
from ansible.module_utils.spam6.ham import bacon, eggs
results['spam6'] = (bacon, eggs)
# Test import of module_utils.spam7.ham bacon and eggs (module and identifier)
from ansible.module_utils.spam7.ham import bacon, eggs
results['spam7'] = (bacon.data, eggs)
# Test import of module_utils/spam8/ham/bacon.py and module_utils/spam8/ham/eggs.py separately
from ansible.module_utils.spam8.ham import bacon
from ansible.module_utils.spam8.ham import eggs
results['spam8'] = (bacon.data, eggs)
# Test that import of module_utils/qux1/quux.py using as works
from ansible.module_utils.qux1 import quux as one
results['qux1'] = one.data
# Test that importing qux2/quux.py and qux2/quuz.py using as works
from ansible.module_utils.qux2 import quux as one, quuz as two
results['qux2'] = (one.data, two.data)
# Test depth
from ansible.module_utils.a.b.c.d.e.f.g.h import data
results['abcdefgh'] = data
from ansible.module_utils.basic import AnsibleModule
AnsibleModule(argument_spec=dict()).exit_json(**results)
| gpl-3.0 |
pkilambi/python-gnocchiclient | gnocchiclient/openstack/common/setup.py | 9 | 13471 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import datetime
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
with open(mailmap, 'r') as fp:
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = [x for x in l.split(' ')
if x.startswith('<')]
mapping[alias] = canonical_email
return mapping
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email in mapping.iteritems():
changelog = changelog.replace(alias, email)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
with open(requirements_file, 'r') as fil:
return fil.read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def write_requirements():
venv = os.environ.get('VIRTUAL_ENV', None)
if venv is not None:
with open("requirements.txt", "w") as req_file:
output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
stdout=subprocess.PIPE)
requirements = output.communicate()[0].strip()
req_file.write(requirements)
def _run_shell_command(cmd):
if os.name == 'nt':
output = subprocess.Popen(["cmd.exe", "/C", cmd],
stdout=subprocess.PIPE)
else:
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE)
out = output.communicate()
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def _get_git_next_version_suffix(branch_name):
datestamp = datetime.datetime.now().strftime('%Y%m%d')
if branch_name == 'milestone-proposed':
revno_prefix = "r"
else:
revno_prefix = ""
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
milestonever = _run_shell_command(milestone_cmd)
if milestonever:
first_half = "%s~%s" % (milestonever, datestamp)
else:
first_half = datestamp
post_version = _get_git_post_version()
# post version should look like:
# 0.1.1.4.gcc9e28a
# where the bit after the last . is the short sha, and the bit between
# the last and second to last is the revno count
(revno, sha) = post_version.split(".")[-2:]
second_half = "%s%s.%s" % (revno_prefix, revno, sha)
return ".".join((first_half, second_half))
def _get_git_current_tag():
return _run_shell_command("git tag --contains HEAD")
def _get_git_tag_info():
return _run_shell_command("git describe --tags")
def _get_git_post_version():
current_tag = _get_git_current_tag()
if current_tag is not None:
return current_tag
else:
tag_info = _get_git_tag_info()
if tag_info is None:
base_version = "0.0"
cmd = "git --no-pager log --oneline"
out = _run_shell_command(cmd)
revno = len(out.split("\n"))
sha = _run_shell_command("git describe --always")
else:
tag_infos = tag_info.split("-")
base_version = "-".join(tag_infos[:-2])
(revno, sha) = tag_infos[-2:]
return "%s.%s.%s" % (base_version, revno, sha)
def write_git_changelog():
"""Write a changelog based on the git changelog."""
new_changelog = 'ChangeLog'
if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_changelog, "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
else:
open(new_changelog, 'w').close()
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = 'jenkins@review.(openstack|stackforge).org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
if not os.getenv('SKIP_GENERATE_AUTHORS'):
if os.path.isdir('.git'):
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
"egrep -v '" + jenkins_email + "'")
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
else:
open(new_authors, 'w').close()
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def read_versioninfo(project):
"""Read the versioninfo file. If it doesn't exist, we're in a github
zipball, and there's really no way to know what version we really
are, but that should be ok, because the utility of that should be
just about nil if this code path is in use in the first place."""
versioninfo_path = os.path.join(project, 'versioninfo')
if os.path.exists(versioninfo_path):
with open(versioninfo_path, 'r') as vinfo:
version = vinfo.read().strip()
else:
version = "0.0.0"
return version
def write_versioninfo(project, version):
"""Write a simple file containing the version of the package."""
with open(os.path.join(project, 'versioninfo'), 'w') as fil:
fil.write("%s\n" % version)
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules)
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print "Generating %s" % output_filename
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in ['html', 'man']:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
cmdclass['build_sphinx'] = LocalBuildDoc
except ImportError:
pass
return cmdclass
def get_git_branchname():
for branch in _run_shell_command("git branch --color=never").split("\n"):
if branch.startswith('*'):
_branch_name = branch.split()[1].strip()
if _branch_name == "(no":
_branch_name = "no-branch"
return _branch_name
def get_pre_version(projectname, base_version):
"""Return a version which is leading up to a version that will
be released in the future."""
if os.path.isdir('.git'):
current_tag = _get_git_current_tag()
if current_tag is not None:
version = current_tag
else:
branch_name = os.getenv('BRANCHNAME',
os.getenv('GERRIT_REFNAME',
get_git_branchname()))
version_suffix = _get_git_next_version_suffix(branch_name)
version = "%s~%s" % (base_version, version_suffix)
write_versioninfo(projectname, version)
return version
else:
version = read_versioninfo(projectname)
return version
def get_post_version(projectname):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
if os.path.isdir('.git'):
version = _get_git_post_version()
write_versioninfo(projectname, version)
return version
return read_versioninfo(projectname)
| apache-2.0 |
rsunder10/PopularityBased-SearchEngine | lib/python3.4/site-packages/django/contrib/staticfiles/management/commands/findstatic.py | 463 | 1745 | from __future__ import unicode_literals
import os
from django.contrib.staticfiles import finders
from django.core.management.base import LabelCommand
from django.utils.encoding import force_text
class Command(LabelCommand):
help = "Finds the absolute paths for the given static file(s)."
label = 'static file'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--first', action='store_false', dest='all',
default=True,
help="Only return the first match for each static file.")
def handle_label(self, path, **options):
verbosity = options['verbosity']
result = finders.find(path, all=options['all'])
path = force_text(path)
if verbosity >= 2:
searched_locations = ("Looking in the following locations:\n %s" %
"\n ".join(force_text(location)
for location in finders.searched_locations))
else:
searched_locations = ''
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = (force_text(os.path.realpath(path)) for path in result)
if verbosity >= 1:
file_list = '\n '.join(result)
return ("Found '%s' here:\n %s\n%s" %
(path, file_list, searched_locations))
else:
return '\n'.join(result)
else:
message = ["No matching file found for '%s'." % path]
if verbosity >= 2:
message.append(searched_locations)
if verbosity >= 1:
self.stderr.write('\n'.join(message))
| mit |
cwisecarver/osf.io | addons/s3/views.py | 3 | 4517 | import httplib
from boto import exception
from django.core.exceptions import ValidationError
from flask import request
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_logged_in
from addons.base import generic_views
from addons.s3 import utils
from addons.s3.serializer import S3Serializer
from osf.models import ExternalAccount
from website.project.decorators import (
must_have_addon, must_have_permission,
must_be_addon_authorizer,
)
SHORT_NAME = 's3'
FULL_NAME = 'Amazon S3'
s3_account_list = generic_views.account_list(
SHORT_NAME,
S3Serializer
)
s3_import_auth = generic_views.import_auth(
SHORT_NAME,
S3Serializer
)
s3_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
s3_get_config = generic_views.get_config(
SHORT_NAME,
S3Serializer
)
def _set_folder(node_addon, folder, auth):
folder_id = folder['id']
node_addon.set_folder(folder_id, auth=auth)
node_addon.save()
s3_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
S3Serializer,
_set_folder
)
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def s3_folder_list(node_addon, **kwargs):
""" Returns all the subsequent folders under the folder id passed.
"""
return node_addon.get_folders()
@must_be_logged_in
def s3_add_user_account(auth, **kwargs):
"""Verifies new external account credentials and adds to user's list"""
try:
access_key = request.json['access_key']
secret_key = request.json['secret_key']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if not (access_key and secret_key):
return {
'message': 'All the fields above are required.'
}, httplib.BAD_REQUEST
user_info = utils.get_user_info(access_key, secret_key)
if not user_info:
return {
'message': ('Unable to access account.\n'
'Check to make sure that the above credentials are valid, '
'and that they have permission to list buckets.')
}, httplib.BAD_REQUEST
if not utils.can_list(access_key, secret_key):
return {
'message': ('Unable to list buckets.\n'
'Listing buckets is required permission that can be changed via IAM')
}, httplib.BAD_REQUEST
account = None
try:
account = ExternalAccount(
provider=SHORT_NAME,
provider_name=FULL_NAME,
oauth_key=access_key,
oauth_secret=secret_key,
provider_id=user_info.id,
display_name=user_info.display_name,
)
account.save()
except ValidationError:
# ... or get the old one
account = ExternalAccount.objects.get(
provider=SHORT_NAME,
provider_id=user_info.id
)
assert account is not None
if not auth.user.external_accounts.filter(id=account.id).exists():
auth.user.external_accounts.add(account)
# Ensure S3 is enabled.
auth.user.get_or_add_addon('s3', auth=auth)
auth.user.save()
return {}
@must_be_addon_authorizer(SHORT_NAME)
@must_have_addon('s3', 'node')
@must_have_permission('write')
def create_bucket(auth, node_addon, **kwargs):
bucket_name = request.json.get('bucket_name', '')
bucket_location = request.json.get('bucket_location', '')
if not utils.validate_bucket_name(bucket_name):
return {
'message': 'That bucket name is not valid.',
'title': 'Invalid bucket name',
}, httplib.BAD_REQUEST
# Get location and verify it is valid
if not utils.validate_bucket_location(bucket_location):
return {
'message': 'That bucket location is not valid.',
'title': 'Invalid bucket location',
}, httplib.BAD_REQUEST
try:
utils.create_bucket(node_addon, bucket_name, bucket_location)
except exception.S3ResponseError as e:
return {
'message': e.message,
'title': 'Problem connecting to S3',
}, httplib.BAD_REQUEST
except exception.S3CreateError as e:
return {
'message': e.message,
'title': "Problem creating bucket '{0}'".format(bucket_name),
}, httplib.BAD_REQUEST
except exception.BotoClientError as e: # Base class catchall
return {
'message': e.message,
'title': 'Error connecting to S3',
}, httplib.BAD_REQUEST
return {}
| apache-2.0 |
koparasy/faultinjection-gem5 | src/dev/alpha/AlphaBackdoor.py | 2 | 1948 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice
class AlphaBackdoor(BasicPioDevice):
type = 'AlphaBackdoor'
cpu = Param.BaseCPU(Parent.cpu[0], "Processor")
disk = Param.SimpleDisk("Simple Disk")
terminal = Param.Terminal(Parent.any, "The console terminal")
system = Param.AlphaSystem(Parent.any, "system object")
| bsd-3-clause |
adw0rd/lettuce | tests/integration/lib/Django-1.2.5/django/conf/locale/en/formats.py | 66 | 1359 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = u'.'
THOUSAND_SEPARATOR = u','
NUMBER_GROUPING = 3
| gpl-3.0 |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_runpy.py | 58 | 9952 | # Test the runpy module
import unittest
import os
import os.path
import sys
import tempfile
from test.test_support import verbose, run_unittest, forget
from runpy import _run_code, _run_module_code, run_module
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
class RunModuleCodeTest(unittest.TestCase):
expected_result = ["Top level assignment", "Lower level reference"]
test_source = (
"# Check basic code execution\n"
"result = ['Top level assignment']\n"
"def f():\n"
" result.append('Lower level reference')\n"
"f()\n"
"# Check the sys module\n"
"import sys\n"
"run_argv0 = sys.argv[0]\n"
"run_name_in_sys_modules = __name__ in sys.modules\n"
"if run_name_in_sys_modules:\n"
" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\n"
"# Check nested operation\n"
"import runpy\n"
"nested = runpy._run_module_code('x=1\\n', mod_name='<run>')\n"
)
def test_run_code(self):
saved_argv0 = sys.argv[0]
d = _run_code(self.test_source, {})
self.failUnless(d["result"] == self.expected_result)
self.failUnless(d["__name__"] is None)
self.failUnless(d["__file__"] is None)
self.failUnless(d["__loader__"] is None)
self.failUnless(d["__package__"] is None)
self.failUnless(d["run_argv0"] is saved_argv0)
self.failUnless("run_name" not in d)
self.failUnless(sys.argv[0] is saved_argv0)
def test_run_module_code(self):
initial = object()
name = "<Nonsense>"
file = "Some other nonsense"
loader = "Now you're just being silly"
package = '' # Treat as a top level module
d1 = dict(initial=initial)
saved_argv0 = sys.argv[0]
d2 = _run_module_code(self.test_source,
d1,
name,
file,
loader,
package)
self.failUnless("result" not in d1)
self.failUnless(d2["initial"] is initial)
self.failUnless(d2["result"] == self.expected_result)
self.failUnless(d2["nested"]["x"] == 1)
self.failUnless(d2["__name__"] is name)
self.failUnless(d2["run_name_in_sys_modules"])
self.failUnless(d2["module_in_sys_modules"])
self.failUnless(d2["__file__"] is file)
self.failUnless(d2["run_argv0"] is file)
self.failUnless(d2["__loader__"] is loader)
self.failUnless(d2["__package__"] is package)
self.failUnless(sys.argv[0] is saved_argv0)
self.failUnless(name not in sys.modules)
class RunModuleTest(unittest.TestCase):
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package
self.expect_import_error("logging")
def test_library_module(self):
run_module("runpy")
def _add_pkg_dir(self, pkg_dir):
os.mkdir(pkg_dir)
pkg_fname = os.path.join(pkg_dir, "__init__"+os.extsep+"py")
pkg_file = open(pkg_fname, "w")
pkg_file.close()
return pkg_fname
def _make_pkg(self, source, depth):
pkg_name = "__runpy_pkg__"
test_fname = "runpy_test"+os.extsep+"py"
pkg_dir = sub_dir = tempfile.mkdtemp()
if verbose: print " Package tree in:", sub_dir
sys.path.insert(0, pkg_dir)
if verbose: print " Updated sys.path:", sys.path[0]
for i in range(depth):
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir)
if verbose: print " Next level in:", sub_dir
if verbose: print " Created:", pkg_fname
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose: print " Created:", mod_fname
mod_name = (pkg_name+".")*depth + "runpy_test"
return pkg_dir, mod_fname, mod_name
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose: print " Removed sys.modules entries"
del sys.path[0]
if verbose: print " Removed sys.path entry"
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError, ex:
if verbose: print ex # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError, ex:
if verbose: print ex # Persist with cleaning up
try:
os.rmdir(top)
if verbose: print " Removed package tree"
except OSError, ex:
if verbose: print ex # Persist with cleaning up
def _check_module(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth))
forget(mod_name)
try:
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name) # Read from source
self.failUnless("x" in d1)
self.failUnless(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name) # Read from bytecode
self.failUnless("x" in d2)
self.failUnless(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling"+os.extsep+"py")
sibling_file = open(sibling_fname, "w")
sibling_file.close()
if verbose: print " Added sibling module:", sibling_fname
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose: print " Added uncle package:", uncle_dir
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose: print " Added cousin package:", cousin_dir
nephew_fname = os.path.join(cousin_dir, "nephew"+os.extsep+"py")
nephew_file = open(nephew_fname, "w")
nephew_file.close()
if verbose: print " Added nephew module:", nephew_fname
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name = (
self._make_pkg(contents, depth))
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.failUnless("__package__" in d1)
self.failUnless(d1["__package__"] == pkg_name)
self.failUnless("sibling" in d1)
self.failUnless("nephew" in d1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.failUnless("__package__" in d2)
self.failUnless(d2["__package__"] == pkg_name)
self.failUnless("sibling" in d2)
self.failUnless("nephew" in d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def test_run_module(self):
for depth in range(4):
if verbose: print "Testing package depth:", depth
self._check_module(depth)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing relative imports at depth:", depth
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing main relative imports at depth:", depth
self._check_relative_imports(depth, "__main__")
def test_main():
run_unittest(RunModuleCodeTest)
run_unittest(RunModuleTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
wemanuel/smry | server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/backend_services/add_backend.py | 2 | 3466 | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for adding a backend to a backend service."""
import copy
from googlecloudapis.compute.v1 import compute_v1_messages
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.compute.lib import backend_services_utils
from googlecloudsdk.compute.lib import base_classes
class AddBackend(base_classes.ReadWriteCommand):
"""Add a backend to a backend service."""
@staticmethod
def Args(parser):
backend_services_utils.AddUpdatableBackendArgs(parser, compute_v1_messages)
parser.add_argument(
'name',
help='The name of the backend service.')
@property
def service(self):
return self.compute.backendServices
@property
def resource_type(self):
return 'backendServices'
def CreateReference(self, args):
return self.CreateGlobalReference(args.name)
def GetGetRequest(self, args):
return (self.service,
'Get',
self.messages.ComputeBackendServicesGetRequest(
backendService=self.ref.Name(),
project=self.project))
def GetSetRequest(self, args, replacement, existing):
return (self.service,
'Update',
self.messages.ComputeBackendServicesUpdateRequest(
backendService=self.ref.Name(),
backendServiceResource=replacement,
project=self.project))
def Modify(self, args, existing):
replacement = copy.deepcopy(existing)
group_ref = None
if args.group is not None:
group_ref = self.CreateZonalReference(
args.group, args.zone, resource_type='zoneViews')
else:
group_ref = self.CreateZonalReference(
args.instance_group, args.zone, resource_type='instanceGroups')
group_uri = group_ref.SelfLink()
for backend in existing.backends:
if group_uri == backend.group:
raise exceptions.ToolException(
'Backend [{0}] in zone [{1}] already exists in backend service '
'[{2}].'.format(args.group, args.zone, args.name))
if args.balancing_mode:
balancing_mode = self.messages.Backend.BalancingModeValueValuesEnum(
args.balancing_mode)
else:
balancing_mode = None
backend = self.messages.Backend(
balancingMode=balancing_mode,
capacityScaler=args.capacity_scaler,
description=args.description,
group=group_uri,
maxRate=args.max_rate,
maxRatePerInstance=args.max_rate_per_instance,
maxUtilization=args.max_utilization)
replacement.backends.append(backend)
return replacement
AddBackend.detailed_help = {
'brief': 'Add a backend to a backend service',
'DESCRIPTION': """
*{command}* is used to add a backend to a backend service. A
backend is a group of tasks that can handle requests sent to a
backend service. Currently, the group of tasks can be one or
more Google Compute Engine virtual machine instances grouped
together using an instance group.
Traffic is first spread evenly across all virtual machines in
the group. When the group is full, traffic is sent to the next
nearest group(s) that still have remaining capacity.
To modify the parameters of a backend after it has been added
to the backend service, use 'gcloud compute backend-services
update-backend' or 'gcloud compute backend-services edit'.
""",
}
| apache-2.0 |
ShassAro/ShassAro | Bl_project/blVirtualEnv/lib/python2.7/site-packages/django/db/backends/sqlite3/base.py | 52 | 23862 | """
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import warnings
import re
from django.conf import settings
from django.db import utils
from django.db.backends import (utils as backend_utils, BaseDatabaseFeatures,
BaseDatabaseOperations, BaseDatabaseWrapper, BaseDatabaseValidation)
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.db.backends.sqlite3.schema import DatabaseSchemaEditor
from django.db.models import fields
from django.db.models.sql import aggregates
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes
from django.utils import six
from django.utils import timezone
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
try:
import pytz
except ImportError:
pytz = None
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("SQLite received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("timestamp"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("decimal"), decoder(backend_utils.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_with_timezone_support)
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
if six.PY2:
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
supports_foreign_keys = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_aggregate_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_aggregate_support(self, aggregate):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg,
aggregates.Variance, aggregates.StdDev)
if (isinstance(aggregate.source, bad_fields) and
isinstance(aggregate, bad_aggregates)):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev and Variance aggregations '
'on date/time fields in sqlite3 '
'since date/time is saved as text.')
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
# It would be more straightforward if we could use the sqlite strftime
# function, but it does not allow for keeping six digits of fractional
# second information, nor does it allow for formatting date and datetime
# values differently. So instead we register our own function that
# formats the datetime combined with the delta in a manner suitable
# for comparisons.
return 'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def value_to_db_datetime(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
if value is None:
return None
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return backend_utils.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return parse_date(value)
elif internal_type == 'DateTimeField':
return parse_datetime_with_timezone_support(value)
elif internal_type == 'TimeField':
return parse_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
def bulk_insert_sql(self, fields, num_values):
res = []
res.append("SELECT %s" % ", ".join(
"%%s AS %s" % self.quote_name(f.column) for f in fields
))
res.extend(["UNION ALL SELECT %s" % ", ".join(["%s"] * len(fields))] * (num_values - 1))
return " ".join(res)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
pattern_ops = {
'startswith': "LIKE %s || '%%%%'",
'istartswith': "LIKE UPPER(%s) || '%%%%'",
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self, *args, **kwargs)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_extract(lookup_type, dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
try:
dt = backend_utils.typecast_timestamp(dt)
delta = datetime.timedelta(int(days), int(secs), int(usecs))
if conn.strip() == '+':
dt = dt + delta
else:
dt = dt - delta
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(dt)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| gpl-2.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.3/django/contrib/admin/templatetags/admin_modify.py | 154 | 2389 | from django import template
register = template.Library()
def prepopulated_fields_js(context):
"""
Creates a list of prepopulated_fields that should render Javascript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if context['add'] and 'adminform' in context:
prepopulated_fields.extend(context['adminform'].prepopulated_fields)
if 'inline_admin_formsets' in context:
for inline_admin_formset in context['inline_admin_formsets']:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
context.update({'prepopulated_fields': prepopulated_fields})
return context
prepopulated_fields_js = register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True)(prepopulated_fields_js)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
return {
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context['show_delete'])),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True
}
submit_row = register.inclusion_tag('admin/submit_line.html', takes_context=True)(submit_row)
def cell_count(inline_admin_form):
"""Returns the number of cells used in a tabular inline"""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Loop through all the fields (one per cell)
for line in fieldset:
for field in line:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
cell_count = register.filter(cell_count)
| lgpl-3.0 |
gcvalderrama/Palantir | worker/helper.py | 1 | 2984 |
import os
import glob
import random
import shutil
class Helper:
def clean_file_name(self, source_folder, extension='txt'):
"""
Rename files
:param source_folder:
:param extension:
:return:
"""
news = glob.glob(source_folder + "/*." + extension)
for news_file in news:
if "?ref_bajada" in news_file:
updated_news_file = news_file.replace('?ref_bajada', '')
os.rename(news_file, updated_news_file)
def remove_first_line(self, source_folder, extension):
"""
Removes first line of text
:param source_folder:
:param extension:
:return:
"""
files = glob.glob(source_folder + '/*.' + extension)
for file in files:
with open(file, 'r') as fin:
data = fin.read().splitlines(True)
with open(file, 'w') as fout:
fout.writelines(data[1:])
def rename_files(self, folder):
"""
Read all html files from folder, and clean character '?ref_bajada' from title
:param folder: Source folder
:return: void - nothing
"""
news = glob.glob(folder + "/*.html")
for news_file in news:
if "?ref_bajada" in news_file:
updated_news_file = news_file.replace('?ref_bajada', '')
os.rename(news_file, updated_news_file)
news_file = updated_news_file
if news_file.startswith(folder + "/actualidad--") \
or news_file.startswith(folder + "/gastronomia--") \
or news_file.startswith(folder + "/policiales--") \
or news_file.startswith(folder + "/tecnologia--"):
continue
os.rename(news_file, news_file.replace(folder + "/", folder + "/policiales--"))
def distribute_files(self, source_folder, destination_folders, split_category, extension='txt'):
news = glob.glob(source_folder + "/*." + extension)
news.sort()
first_category = []
second_category = []
for item in news:
if split_category in item:
first_category.append(item)
else:
second_category.append(item)
start_index = 0
random.shuffle(first_category)
random.shuffle(second_category)
for folder, number_items in destination_folders.items():
if not os.path.exists(folder):
os.makedirs(folder)
end_index = start_index + number_items
first_category_segment = first_category[start_index:end_index]
for news_file in first_category_segment:
shutil.copy2(news_file, folder)
second_category_segment = second_category[start_index:end_index]
for news_file in second_category_segment:
shutil.copy2(news_file, folder)
start_index = end_index + 1
| bsd-2-clause |
amiguez/youtube-dl | youtube_dl/extractor/chilloutzone.py | 169 | 3600 | from __future__ import unicode_literals
import re
import base64
import json
from .common import InfoExtractor
from ..utils import (
clean_html,
ExtractorError
)
class ChilloutzoneIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?chilloutzone\.net/video/(?P<id>[\w|-]+)\.html'
_TESTS = [{
'url': 'http://www.chilloutzone.net/video/enemene-meck-alle-katzen-weg.html',
'md5': 'a76f3457e813ea0037e5244f509e66d1',
'info_dict': {
'id': 'enemene-meck-alle-katzen-weg',
'ext': 'mp4',
'title': 'Enemene Meck - Alle Katzen weg',
'description': 'Ist das der Umkehrschluss des Niesenden Panda-Babys?',
},
}, {
'note': 'Video hosted at YouTube',
'url': 'http://www.chilloutzone.net/video/eine-sekunde-bevor.html',
'info_dict': {
'id': '1YVQaAgHyRU',
'ext': 'mp4',
'title': '16 Photos Taken 1 Second Before Disaster',
'description': 'md5:58a8fcf6a459fe0a08f54140f0ad1814',
'uploader': 'BuzzFeedVideo',
'uploader_id': 'BuzzFeedVideo',
'upload_date': '20131105',
},
}, {
'note': 'Video hosted at Vimeo',
'url': 'http://www.chilloutzone.net/video/icon-blending.html',
'md5': '2645c678b8dc4fefcc0e1b60db18dac1',
'info_dict': {
'id': '85523671',
'ext': 'mp4',
'title': 'The Sunday Times - Icons',
'description': 're:(?s)^Watch the making of - makingoficons.com.{300,}',
'uploader': 'Us',
'uploader_id': 'usfilms',
'upload_date': '20140131'
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
base64_video_info = self._html_search_regex(
r'var cozVidData = "(.+?)";', webpage, 'video data')
decoded_video_info = base64.b64decode(base64_video_info.encode('utf-8')).decode('utf-8')
video_info_dict = json.loads(decoded_video_info)
# get video information from dict
video_url = video_info_dict['mediaUrl']
description = clean_html(video_info_dict.get('description'))
title = video_info_dict['title']
native_platform = video_info_dict['nativePlatform']
native_video_id = video_info_dict['nativeVideoId']
source_priority = video_info_dict['sourcePriority']
# If nativePlatform is None a fallback mechanism is used (i.e. youtube embed)
if native_platform is None:
youtube_url = self._html_search_regex(
r'<iframe.* src="((?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
webpage, 'fallback video URL', default=None)
if youtube_url is not None:
return self.url_result(youtube_url, ie='Youtube')
# Non Fallback: Decide to use native source (e.g. youtube or vimeo) or
# the own CDN
if source_priority == 'native':
if native_platform == 'youtube':
return self.url_result(native_video_id, ie='Youtube')
if native_platform == 'vimeo':
return self.url_result(
'http://vimeo.com/' + native_video_id, ie='Vimeo')
if not video_url:
raise ExtractorError('No video found')
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': title,
'description': description,
}
| unlicense |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/django/core/management/commands/dumpdata.py | 72 | 8692 | import warnings
from collections import OrderedDict
from django.apps import apps
from django.core import serializers
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import parse_apps_and_model_labels
from django.db import DEFAULT_DB_ALIAS, router
class ProxyModelWarning(Warning):
pass
class Command(BaseCommand):
help = (
"Output the contents of the database as a fixture of the given format "
"(using each model's default manager unless --all is specified)."
)
def add_arguments(self, parser):
parser.add_argument(
'args', metavar='app_label[.ModelName]', nargs='*',
help='Restricts dumped data to the specified app_label or app_label.ModelName.',
)
parser.add_argument(
'--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.',
)
parser.add_argument(
'--indent', default=None, dest='indent', type=int,
help='Specifies the indent level to use when pretty-printing output.',
)
parser.add_argument(
'--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to dump fixtures from. '
'Defaults to the "default" database.',
)
parser.add_argument(
'-e', '--exclude', dest='exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude '
'(use multiple --exclude to exclude multiple apps/models).',
)
parser.add_argument(
'--natural-foreign', action='store_true', dest='use_natural_foreign_keys', default=False,
help='Use natural foreign keys if they are available.',
)
parser.add_argument(
'--natural-primary', action='store_true', dest='use_natural_primary_keys', default=False,
help='Use natural primary keys if they are available.',
)
parser.add_argument(
'-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, "
"including those that would otherwise be filtered or modified by a custom manager.",
)
parser.add_argument(
'--pks', dest='primary_keys',
help="Only dump objects with given primary keys. Accepts a comma-separated "
"list of keys. This option only works when you specify one model.",
)
parser.add_argument(
'-o', '--output', default=None, dest='output',
help='Specifies file to which the output is written.'
)
def handle(self, *app_labels, **options):
format = options['format']
indent = options['indent']
using = options['database']
excludes = options['exclude']
output = options['output']
show_traceback = options['traceback']
use_natural_foreign_keys = options['use_natural_foreign_keys']
use_natural_primary_keys = options['use_natural_primary_keys']
use_base_manager = options['use_base_manager']
pks = options['primary_keys']
if pks:
primary_keys = [pk.strip() for pk in pks.split(',')]
else:
primary_keys = []
excluded_models, excluded_apps = parse_apps_and_model_labels(excludes)
if len(app_labels) == 0:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict(
(app_config, None) for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config not in excluded_apps
)
else:
if len(app_labels) > 1 and primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
try:
model = app_config.get_model(model_label)
except LookupError:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
app_list_value = app_list.setdefault(app_config, [])
# We may have previously seen a "all-models" request for
# this app (no model qualifier was given). In this case
# there is no need adding specific models to the list.
if app_list_value is not None:
if model not in app_list_value:
app_list_value.append(model)
except ValueError:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
# This is just an app - no model qualifier
app_label = label
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
app_list[app_config] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
try:
serializers.get_serializer(format)
except serializers.SerializerDoesNotExist:
pass
raise CommandError("Unknown serialization format: %s" % format)
def get_objects(count_only=False):
"""
Collate the objects to be serialized. If count_only is True, just
count the number of objects to be serialized.
"""
models = serializers.sort_dependencies(app_list.items())
for model in models:
if model in excluded_models:
continue
if model._meta.proxy and model._meta.proxy_for_model not in models:
warnings.warn(
"%s is a proxy model and won't be serialized." % model._meta.label,
category=ProxyModelWarning,
)
if not model._meta.proxy and router.allow_migrate_model(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
queryset = objects.using(using).order_by(model._meta.pk.name)
if primary_keys:
queryset = queryset.filter(pk__in=primary_keys)
if count_only:
yield queryset.order_by().count()
else:
for obj in queryset.iterator():
yield obj
try:
self.stdout.ending = None
progress_output = None
object_count = 0
# If dumpdata is outputting to stdout, there is no way to display progress
if (output and self.stdout.isatty() and options['verbosity'] > 0):
progress_output = self.stdout
object_count = sum(get_objects(count_only=True))
stream = open(output, 'w') if output else None
try:
serializers.serialize(
format, get_objects(), indent=indent,
use_natural_foreign_keys=use_natural_foreign_keys,
use_natural_primary_keys=use_natural_primary_keys,
stream=stream or self.stdout, progress_output=progress_output,
object_count=object_count,
)
finally:
if stream:
stream.close()
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
| gpl-3.0 |
hustcalm/seabios-hacking | tools/layoutrom.py | 23 | 23765 | #!/usr/bin/env python
# Script to analyze code and arrange ld sections.
#
# Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys
# LD script headers/trailers
COMMONHEADER = """
/* DO NOT EDIT! This is an autogenerated file. See tools/layoutrom.py. */
OUTPUT_FORMAT("elf32-i386")
OUTPUT_ARCH("i386")
SECTIONS
{
"""
COMMONTRAILER = """
/* Discard regular data sections to force a link error if
* code attempts to access data not marked with VAR16 (or other
* appropriate macro)
*/
/DISCARD/ : {
*(.text*) *(.data*) *(.bss*) *(.rodata*)
*(COMMON) *(.discard*) *(.eh_frame) *(.note*)
}
}
"""
######################################################################
# Determine section locations
######################################################################
# Align 'pos' to 'alignbytes' offset
def alignpos(pos, alignbytes):
mask = alignbytes - 1
return (pos + mask) & ~mask
# Determine the final addresses for a list of sections that end at an
# address.
def setSectionsStart(sections, endaddr, minalign=1, segoffset=0):
totspace = 0
for section in sections:
if section.align > minalign:
minalign = section.align
totspace = alignpos(totspace, section.align) + section.size
startaddr = (endaddr - totspace) / minalign * minalign
curaddr = startaddr
for section in sections:
curaddr = alignpos(curaddr, section.align)
section.finalloc = curaddr
section.finalsegloc = curaddr - segoffset
curaddr += section.size
return startaddr, minalign
# The 16bit code can't exceed 64K of space.
BUILD_BIOS_ADDR = 0xf0000
BUILD_BIOS_SIZE = 0x10000
BUILD_ROM_START = 0xc0000
# Layout the 16bit code. This ensures sections with fixed offset
# requirements are placed in the correct location. It also places the
# 16bit code as high as possible in the f-segment.
def fitSections(sections, fillsections):
# fixedsections = [(addr, section), ...]
fixedsections = []
for section in sections:
if section.name.startswith('.fixedaddr.'):
addr = int(section.name[11:], 16)
section.finalloc = addr + BUILD_BIOS_ADDR
section.finalsegloc = addr
fixedsections.append((addr, section))
if section.align != 1:
print "Error: Fixed section %s has non-zero alignment (%d)" % (
section.name, section.align)
sys.exit(1)
fixedsections.sort()
firstfixed = fixedsections[0][0]
# Find freespace in fixed address area
# fixedAddr = [(freespace, section), ...]
fixedAddr = []
for i in range(len(fixedsections)):
fixedsectioninfo = fixedsections[i]
addr, section = fixedsectioninfo
if i == len(fixedsections) - 1:
nextaddr = BUILD_BIOS_SIZE
else:
nextaddr = fixedsections[i+1][0]
avail = nextaddr - addr - section.size
fixedAddr.append((avail, section))
fixedAddr.sort()
# Attempt to fit other sections into fixed area
canrelocate = [(section.size, section.align, section.name, section)
for section in fillsections]
canrelocate.sort()
canrelocate = [section for size, align, name, section in canrelocate]
totalused = 0
for freespace, fixedsection in fixedAddr:
addpos = fixedsection.finalsegloc + fixedsection.size
totalused += fixedsection.size
nextfixedaddr = addpos + freespace
# print "Filling section %x uses %d, next=%x, available=%d" % (
# fixedsection.finalloc, fixedsection.size, nextfixedaddr, freespace)
while 1:
canfit = None
for fitsection in canrelocate:
if addpos + fitsection.size > nextfixedaddr:
# Can't fit and nothing else will fit.
break
fitnextaddr = alignpos(addpos, fitsection.align) + fitsection.size
# print "Test %s - %x vs %x" % (
# fitsection.name, fitnextaddr, nextfixedaddr)
if fitnextaddr > nextfixedaddr:
# This item can't fit.
continue
canfit = (fitnextaddr, fitsection)
if canfit is None:
break
# Found a section that can fit.
fitnextaddr, fitsection = canfit
canrelocate.remove(fitsection)
fitsection.finalloc = addpos + BUILD_BIOS_ADDR
fitsection.finalsegloc = addpos
addpos = fitnextaddr
totalused += fitsection.size
# print " Adding %s (size %d align %d) pos=%x avail=%d" % (
# fitsection[2], fitsection[0], fitsection[1]
# , fitnextaddr, nextfixedaddr - fitnextaddr)
# Report stats
total = BUILD_BIOS_SIZE-firstfixed
slack = total - totalused
print ("Fixed space: 0x%x-0x%x total: %d slack: %d"
" Percent slack: %.1f%%" % (
firstfixed, BUILD_BIOS_SIZE, total, slack,
(float(slack) / total) * 100.0))
return firstfixed + BUILD_BIOS_ADDR
# Return the subset of sections with a given category
def getSectionsCategory(sections, category):
return [section for section in sections if section.category == category]
# Return the subset of sections with a given name prefix
def getSectionsPrefix(sections, prefix):
return [section for section in sections
if section.name.startswith(prefix)]
# The sections (and associated information) to be placed in output rom
class LayoutInfo:
sections16 = sec16_start = sec16_align = None
sections32seg = sec32seg_start = sec32seg_align = None
sections32flat = sec32flat_start = sec32flat_align = None
sections32init = sec32init_start = sec32init_align = None
sections32low = sec32low_start = sec32low_align = None
datalow_base = final_sec32low_start = None
# Determine final memory addresses for sections
def doLayout(sections, genreloc):
li = LayoutInfo()
# Determine 16bit positions
li.sections16 = getSectionsCategory(sections, '16')
textsections = getSectionsPrefix(li.sections16, '.text.')
rodatasections = (
getSectionsPrefix(li.sections16, '.rodata.str1.1')
+ getSectionsPrefix(li.sections16, '.rodata.__func__.')
+ getSectionsPrefix(li.sections16, '.rodata.__PRETTY_FUNCTION__.'))
datasections = getSectionsPrefix(li.sections16, '.data16.')
fixedsections = getSectionsPrefix(li.sections16, '.fixedaddr.')
firstfixed = fitSections(fixedsections, textsections)
remsections = [s for s in textsections+rodatasections+datasections
if s.finalloc is None]
li.sec16_start, li.sec16_align = setSectionsStart(
remsections, firstfixed, segoffset=BUILD_BIOS_ADDR)
# Determine 32seg positions
li.sections32seg = getSectionsCategory(sections, '32seg')
textsections = getSectionsPrefix(li.sections32seg, '.text.')
rodatasections = (
getSectionsPrefix(li.sections32seg, '.rodata.str1.1')
+ getSectionsPrefix(li.sections32seg, '.rodata.__func__.')
+ getSectionsPrefix(li.sections32seg, '.rodata.__PRETTY_FUNCTION__.'))
datasections = getSectionsPrefix(li.sections32seg, '.data32seg.')
li.sec32seg_start, li.sec32seg_align = setSectionsStart(
textsections + rodatasections + datasections, li.sec16_start
, segoffset=BUILD_BIOS_ADDR)
# Determine 32flat runtime positions
li.sections32flat = getSectionsCategory(sections, '32flat')
textsections = getSectionsPrefix(li.sections32flat, '.text.')
rodatasections = getSectionsPrefix(li.sections32flat, '.rodata')
datasections = getSectionsPrefix(li.sections32flat, '.data.')
bsssections = getSectionsPrefix(li.sections32flat, '.bss.')
li.sec32flat_start, li.sec32flat_align = setSectionsStart(
textsections + rodatasections + datasections + bsssections
, li.sec32seg_start, 16)
# Determine 32flat init positions
li.sections32init = getSectionsCategory(sections, '32init')
textsections = getSectionsPrefix(li.sections32init, '.text.')
rodatasections = getSectionsPrefix(li.sections32init, '.rodata')
datasections = getSectionsPrefix(li.sections32init, '.data.')
bsssections = getSectionsPrefix(li.sections32init, '.bss.')
li.sec32init_start, li.sec32init_align = setSectionsStart(
textsections + rodatasections + datasections + bsssections
, li.sec32flat_start, 16)
# Determine "low memory" data positions
li.sections32low = getSectionsCategory(sections, '32low')
if genreloc:
sec32low_top = li.sec32init_start
final_sec32low_top = min(BUILD_BIOS_ADDR, li.sec32flat_start)
else:
sec32low_top = min(BUILD_BIOS_ADDR, li.sec32init_start)
final_sec32low_top = sec32low_top
relocdelta = final_sec32low_top - sec32low_top
datalow_base = final_sec32low_top - 64*1024
li.datalow_base = max(BUILD_ROM_START, alignpos(datalow_base, 2*1024))
li.sec32low_start, li.sec32low_align = setSectionsStart(
li.sections32low, sec32low_top, 16
, segoffset=li.datalow_base - relocdelta)
li.final_sec32low_start = li.sec32low_start + relocdelta
# Print statistics
size16 = BUILD_BIOS_ADDR + BUILD_BIOS_SIZE - li.sec16_start
size32seg = li.sec16_start - li.sec32seg_start
size32flat = li.sec32seg_start - li.sec32flat_start
size32init = li.sec32flat_start - li.sec32init_start
sizelow = sec32low_top - li.sec32low_start
print "16bit size: %d" % size16
print "32bit segmented size: %d" % size32seg
print "32bit flat size: %d" % size32flat
print "32bit flat init size: %d" % size32init
print "Lowmem size: %d" % sizelow
return li
######################################################################
# Linker script output
######################################################################
# Write LD script includes for the given cross references
def outXRefs(sections, useseg=0):
xrefs = {}
out = ""
for section in sections:
for reloc in section.relocs:
symbol = reloc.symbol
if (symbol.section is None
or (symbol.section.fileid == section.fileid
and symbol.name == reloc.symbolname)
or reloc.symbolname in xrefs):
continue
xrefs[reloc.symbolname] = 1
loc = symbol.section.finalloc
if useseg:
loc = symbol.section.finalsegloc
out += "%s = 0x%x ;\n" % (reloc.symbolname, loc + symbol.offset)
return out
# Write LD script includes for the given sections using relative offsets
def outRelSections(sections, startsym, useseg=0):
sections = [(section.finalloc, section) for section in sections
if section.finalloc is not None]
sections.sort()
out = ""
for addr, section in sections:
loc = section.finalloc
if useseg:
loc = section.finalsegloc
out += ". = ( 0x%x - %s ) ;\n" % (loc, startsym)
if section.name == '.rodata.str1.1':
out += "_rodata = . ;\n"
out += "*(%s)\n" % (section.name,)
return out
# Build linker script output for a list of relocations.
def strRelocs(outname, outrel, relocs):
relocs.sort()
return (" %s_start = ABSOLUTE(.) ;\n" % (outname,)
+ "".join(["LONG(0x%x - %s)\n" % (pos, outrel)
for pos in relocs])
+ " %s_end = ABSOLUTE(.) ;\n" % (outname,))
# Find all relocations in the given sections with the given attributes
def getRelocs(sections, type=None, category=None, notcategory=None):
out = []
for section in sections:
for reloc in section.relocs:
if reloc.symbol.section is None:
continue
destcategory = reloc.symbol.section.category
if ((type is None or reloc.type == type)
and (category is None or destcategory == category)
and (notcategory is None or destcategory != notcategory)):
out.append(section.finalloc + reloc.offset)
return out
# Return the start address and minimum alignment for a set of sections
def getSectionsStart(sections, defaddr=0):
return min([section.finalloc for section in sections
if section.finalloc is not None] or [defaddr])
# Output the linker scripts for all required sections.
def writeLinkerScripts(li, entrysym, genreloc, out16, out32seg, out32flat):
# Write 16bit linker script
out = outXRefs(li.sections16, useseg=1) + """
datalow_base = 0x%x ;
_datalow_seg = 0x%x ;
code16_start = 0x%x ;
.text16 code16_start : {
%s
}
""" % (li.datalow_base,
li.datalow_base / 16,
li.sec16_start - BUILD_BIOS_ADDR,
outRelSections(li.sections16, 'code16_start', useseg=1))
outfile = open(out16, 'wb')
outfile.write(COMMONHEADER + out + COMMONTRAILER)
outfile.close()
# Write 32seg linker script
out = outXRefs(li.sections32seg, useseg=1) + """
code32seg_start = 0x%x ;
.text32seg code32seg_start : {
%s
}
""" % (li.sec32seg_start - BUILD_BIOS_ADDR,
outRelSections(li.sections32seg, 'code32seg_start', useseg=1))
outfile = open(out32seg, 'wb')
outfile.write(COMMONHEADER + out + COMMONTRAILER)
outfile.close()
# Write 32flat linker script
sections32all = li.sections32flat + li.sections32init + li.sections32low
sec32all_start = li.sec32low_start
entrysympos = entrysym.section.finalloc + entrysym.offset
relocstr = ""
if genreloc:
# Generate relocations
absrelocs = getRelocs(
li.sections32init, type='R_386_32', category='32init')
relrelocs = getRelocs(
li.sections32init, type='R_386_PC32', notcategory='32init')
initrelocs = getRelocs(
li.sections32flat + li.sections32low + li.sections16
+ li.sections32seg, category='32init')
lowrelocs = getRelocs(sections32all, category='32low')
relocstr = (strRelocs("_reloc_abs", "code32init_start", absrelocs)
+ strRelocs("_reloc_rel", "code32init_start", relrelocs)
+ strRelocs("_reloc_init", "code32flat_start", initrelocs)
+ strRelocs("_reloc_datalow", "code32flat_start", lowrelocs))
numrelocs = len(absrelocs + relrelocs + initrelocs + lowrelocs)
sec32all_start -= numrelocs * 4
out = outXRefs(sections32all) + """
%s = 0x%x ;
_reloc_min_align = 0x%x ;
datalow_base = 0x%x ;
final_datalow_start = 0x%x ;
code32flat_start = 0x%x ;
.text code32flat_start : {
%s
datalow_start = ABSOLUTE(.) ;
%s
datalow_end = ABSOLUTE(.) ;
code32init_start = ABSOLUTE(.) ;
%s
code32init_end = ABSOLUTE(.) ;
%s
. = ( 0x%x - code32flat_start ) ;
*(.text32seg)
. = ( 0x%x - code32flat_start ) ;
*(.text16)
code32flat_end = ABSOLUTE(.) ;
} :text
""" % (entrysym.name, entrysympos,
li.sec32init_align,
li.datalow_base,
li.final_sec32low_start,
sec32all_start,
relocstr,
outRelSections(li.sections32low, 'code32flat_start'),
outRelSections(li.sections32init, 'code32flat_start'),
outRelSections(li.sections32flat, 'code32flat_start'),
li.sec32seg_start,
li.sec16_start)
out = COMMONHEADER + out + COMMONTRAILER + """
ENTRY(%s)
PHDRS
{
text PT_LOAD AT ( code32flat_start ) ;
}
""" % (entrysym.name,)
outfile = open(out32flat, 'wb')
outfile.write(out)
outfile.close()
######################################################################
# Detection of init code
######################################################################
def markRuntime(section, sections):
if (section is None or not section.keep or section.category is not None
or '.init.' in section.name or section.fileid != '32flat'):
return
section.category = '32flat'
# Recursively mark all sections this section points to
for reloc in section.relocs:
markRuntime(reloc.symbol.section, sections)
def findInit(sections):
# Recursively find and mark all "runtime" sections.
for section in sections:
if ('.datalow.' in section.name or '.runtime.' in section.name
or '.export.' in section.name):
markRuntime(section, sections)
for section in sections:
if section.category is not None:
continue
if section.fileid == '32flat':
section.category = '32init'
else:
section.category = section.fileid
######################################################################
# Section garbage collection
######################################################################
CFUNCPREFIX = [('_cfunc16_', 0), ('_cfunc32seg_', 1), ('_cfunc32flat_', 2)]
# Find and keep the section associated with a symbol (if available).
def keepsymbol(reloc, infos, pos, isxref):
symbolname = reloc.symbolname
mustbecfunc = 0
for symprefix, needpos in CFUNCPREFIX:
if symbolname.startswith(symprefix):
if needpos != pos:
return -1
symbolname = symbolname[len(symprefix):]
mustbecfunc = 1
break
symbol = infos[pos][1].get(symbolname)
if (symbol is None or symbol.section is None
or symbol.section.name.startswith('.discard.')):
return -1
isdestcfunc = (symbol.section.name.startswith('.text.')
and not symbol.section.name.startswith('.text.asm.'))
if ((mustbecfunc and not isdestcfunc)
or (not mustbecfunc and isdestcfunc and isxref)):
return -1
reloc.symbol = symbol
keepsection(symbol.section, infos, pos)
return 0
# Note required section, and recursively set all referenced sections
# as required.
def keepsection(section, infos, pos=0):
if section.keep:
# Already kept - nothing to do.
return
section.keep = 1
# Keep all sections that this section points to
for reloc in section.relocs:
ret = keepsymbol(reloc, infos, pos, 0)
if not ret:
continue
# Not in primary sections - it may be a cross 16/32 reference
ret = keepsymbol(reloc, infos, (pos+1)%3, 1)
if not ret:
continue
ret = keepsymbol(reloc, infos, (pos+2)%3, 1)
if not ret:
continue
# Determine which sections are actually referenced and need to be
# placed into the output file.
def gc(info16, info32seg, info32flat):
# infos = ((sections16, symbols16), (sect32seg, sym32seg)
# , (sect32flat, sym32flat))
infos = (info16, info32seg, info32flat)
# Start by keeping sections that are globally visible.
for section in info16[0]:
if section.name.startswith('.fixedaddr.') or '.export.' in section.name:
keepsection(section, infos)
return [section for section in info16[0]+info32seg[0]+info32flat[0]
if section.keep]
######################################################################
# Startup and input parsing
######################################################################
class Section:
name = size = alignment = fileid = relocs = None
finalloc = finalsegloc = category = keep = None
class Reloc:
offset = type = symbolname = symbol = None
class Symbol:
name = offset = section = None
# Read in output from objdump
def parseObjDump(file, fileid):
# sections = [section, ...]
sections = []
sectionmap = {}
# symbols[symbolname] = symbol
symbols = {}
state = None
for line in file.readlines():
line = line.rstrip()
if line == 'Sections:':
state = 'section'
continue
if line == 'SYMBOL TABLE:':
state = 'symbol'
continue
if line.startswith('RELOCATION RECORDS FOR ['):
sectionname = line[24:-2]
if sectionname.startswith('.debug_'):
# Skip debugging sections (to reduce parsing time)
state = None
continue
state = 'reloc'
relocsection = sectionmap[sectionname]
continue
if state == 'section':
try:
idx, name, size, vma, lma, fileoff, align = line.split()
if align[:3] != '2**':
continue
section = Section()
section.name = name
section.size = int(size, 16)
section.align = 2**int(align[3:])
section.fileid = fileid
section.relocs = []
sections.append(section)
sectionmap[name] = section
except ValueError:
pass
continue
if state == 'symbol':
try:
parts = line[17:].split()
if len(parts) == 3:
sectionname, size, name = parts
elif len(parts) == 4 and parts[2] == '.hidden':
sectionname, size, hidden, name = parts
else:
continue
symbol = Symbol()
symbol.size = int(size, 16)
symbol.offset = int(line[:8], 16)
symbol.name = name
symbol.section = sectionmap.get(sectionname)
symbols[name] = symbol
except ValueError:
pass
continue
if state == 'reloc':
try:
off, type, symbolname = line.split()
reloc = Reloc()
reloc.offset = int(off, 16)
reloc.type = type
reloc.symbolname = symbolname
reloc.symbol = symbols.get(symbolname)
if reloc.symbol is None:
# Some binutils (2.20.1) give section name instead
# of a symbol - create a dummy symbol.
reloc.symbol = symbol = Symbol()
symbol.size = 0
symbol.offset = 0
symbol.name = symbolname
symbol.section = sectionmap.get(symbolname)
symbols[symbolname] = symbol
relocsection.relocs.append(reloc)
except ValueError:
pass
return sections, symbols
def main():
# Get output name
in16, in32seg, in32flat, out16, out32seg, out32flat = sys.argv[1:]
# Read in the objdump information
infile16 = open(in16, 'rb')
infile32seg = open(in32seg, 'rb')
infile32flat = open(in32flat, 'rb')
# infoX = (sections, symbols)
info16 = parseObjDump(infile16, '16')
info32seg = parseObjDump(infile32seg, '32seg')
info32flat = parseObjDump(infile32flat, '32flat')
# Figure out which sections to keep.
sections = gc(info16, info32seg, info32flat)
# Separate 32bit flat into runtime and init parts
findInit(sections)
# Note "low memory" parts
for section in getSectionsPrefix(sections, '.datalow.'):
section.category = '32low'
# Determine the final memory locations of each kept section.
genreloc = '_reloc_abs_start' in info32flat[1]
li = doLayout(sections, genreloc)
# Write out linker script files.
entrysym = info16[1]['entry_elf']
writeLinkerScripts(li, entrysym, genreloc, out16, out32seg, out32flat)
if __name__ == '__main__':
main()
| gpl-3.0 |
Acidburn0zzz/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_runner_xunit.py | 32 | 9084 | """
test correct setup/teardowns at
module, class, and instance level
"""
from __future__ import absolute_import, division, print_function
import pytest
def test_module_and_function_setup(testdir):
reprec = testdir.inline_runsource(
"""
modlevel = []
def setup_module(module):
assert not modlevel
module.modlevel.append(42)
def teardown_module(module):
modlevel.pop()
def setup_function(function):
function.answer = 17
def teardown_function(function):
del function.answer
def test_modlevel():
assert modlevel[0] == 42
assert test_modlevel.answer == 17
class TestFromClass(object):
def test_module(self):
assert modlevel[0] == 42
assert not hasattr(test_modlevel, 'answer')
"""
)
rep = reprec.matchreport("test_modlevel")
assert rep.passed
rep = reprec.matchreport("test_module")
assert rep.passed
def test_module_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource(
"""
values = []
def setup_module(module):
values.append(1)
0/0
def test_nothing():
pass
def teardown_module(module):
values.append(2)
"""
)
reprec.assertoutcome(failed=1)
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.values == [1]
def test_setup_function_failure_no_teardown(testdir):
reprec = testdir.inline_runsource(
"""
modlevel = []
def setup_function(function):
modlevel.append(1)
0/0
def teardown_function(module):
modlevel.append(2)
def test_func():
pass
"""
)
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.modlevel == [1]
def test_class_setup(testdir):
reprec = testdir.inline_runsource(
"""
class TestSimpleClassSetup(object):
clslevel = []
def setup_class(cls):
cls.clslevel.append(23)
def teardown_class(cls):
cls.clslevel.pop()
def test_classlevel(self):
assert self.clslevel[0] == 23
class TestInheritedClassSetupStillWorks(TestSimpleClassSetup):
def test_classlevel_anothertime(self):
assert self.clslevel == [23]
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
assert not TestInheritedClassSetupStillWorks.clslevel
"""
)
reprec.assertoutcome(passed=1 + 2 + 1)
def test_class_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource(
"""
class TestSimpleClassSetup(object):
clslevel = []
def setup_class(cls):
0/0
def teardown_class(cls):
cls.clslevel.append(1)
def test_classlevel(self):
pass
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
"""
)
reprec.assertoutcome(failed=1, passed=1)
def test_method_setup(testdir):
reprec = testdir.inline_runsource(
"""
class TestSetupMethod(object):
def setup_method(self, meth):
self.methsetup = meth
def teardown_method(self, meth):
del self.methsetup
def test_some(self):
assert self.methsetup == self.test_some
def test_other(self):
assert self.methsetup == self.test_other
"""
)
reprec.assertoutcome(passed=2)
def test_method_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource(
"""
class TestMethodSetup(object):
clslevel = []
def setup_method(self, method):
self.clslevel.append(1)
0/0
def teardown_method(self, method):
self.clslevel.append(2)
def test_method(self):
pass
def test_cleanup():
assert TestMethodSetup.clslevel == [1]
"""
)
reprec.assertoutcome(failed=1, passed=1)
def test_method_generator_setup(testdir):
reprec = testdir.inline_runsource(
"""
class TestSetupTeardownOnInstance(object):
def setup_class(cls):
cls.classsetup = True
def setup_method(self, method):
self.methsetup = method
def test_generate(self):
assert self.classsetup
assert self.methsetup == self.test_generate
yield self.generated, 5
yield self.generated, 2
def generated(self, value):
assert self.classsetup
assert self.methsetup == self.test_generate
assert value == 5
"""
)
reprec.assertoutcome(passed=1, failed=1)
def test_func_generator_setup(testdir):
reprec = testdir.inline_runsource(
"""
import sys
def setup_module(mod):
print ("setup_module")
mod.x = []
def setup_function(fun):
print ("setup_function")
x.append(1)
def teardown_function(fun):
print ("teardown_function")
x.pop()
def test_one():
assert x == [1]
def check():
print ("check")
sys.stderr.write("e\\n")
assert x == [1]
yield check
assert x == [1]
"""
)
rep = reprec.matchreport("test_one", names="pytest_runtest_logreport")
assert rep.passed
def test_method_setup_uses_fresh_instances(testdir):
reprec = testdir.inline_runsource(
"""
class TestSelfState1(object):
memory = []
def test_hello(self):
self.memory.append(self)
def test_afterhello(self):
assert self != self.memory[0]
"""
)
reprec.assertoutcome(passed=2, failed=0)
def test_setup_that_skips_calledagain(testdir):
p = testdir.makepyfile(
"""
import pytest
def setup_module(mod):
pytest.skip("x")
def test_function1():
pass
def test_function2():
pass
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(skipped=2)
def test_setup_fails_again_on_all_tests(testdir):
p = testdir.makepyfile(
"""
import pytest
def setup_module(mod):
raise ValueError(42)
def test_function1():
pass
def test_function2():
pass
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=2)
def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
p = testdir.makepyfile(
"""
import pytest
def setup_module(mod):
raise ValueError(42)
@pytest.fixture
def hello(request):
raise ValueError("xyz43")
def test_function1(hello):
pass
def test_function2(hello):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*function1*",
"*ValueError*42*",
"*function2*",
"*ValueError*42*",
"*2 error*",
]
)
assert "xyz43" not in result.stdout.str()
@pytest.mark.parametrize("arg", ["", "arg"])
def test_setup_teardown_function_level_with_optional_argument(
testdir, monkeypatch, arg
):
"""parameter to setup/teardown xunit-style functions parameter is now optional (#1728)."""
import sys
trace_setups_teardowns = []
monkeypatch.setattr(
sys, "trace_setups_teardowns", trace_setups_teardowns, raising=False
)
p = testdir.makepyfile(
"""
import pytest
import sys
trace = sys.trace_setups_teardowns.append
def setup_module({arg}): trace('setup_module')
def teardown_module({arg}): trace('teardown_module')
def setup_function({arg}): trace('setup_function')
def teardown_function({arg}): trace('teardown_function')
def test_function_1(): pass
def test_function_2(): pass
class Test(object):
def setup_method(self, {arg}): trace('setup_method')
def teardown_method(self, {arg}): trace('teardown_method')
def test_method_1(self): pass
def test_method_2(self): pass
""".format(
arg=arg
)
)
result = testdir.inline_run(p)
result.assertoutcome(passed=4)
expected = [
"setup_module",
"setup_function",
"teardown_function",
"setup_function",
"teardown_function",
"setup_method",
"teardown_method",
"setup_method",
"teardown_method",
"teardown_module",
]
assert trace_setups_teardowns == expected
| mpl-2.0 |
mahendra-r/edx-platform | lms/djangoapps/certificates/migrations/0023_auto__del_unique_badgeassertion_course_id_user__add_unique_badgeassert.py | 52 | 11804 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'BadgeAssertion', fields ['course_id', 'user']
db.delete_unique('certificates_badgeassertion', ['course_id', 'user_id'])
# Adding unique constraint on 'BadgeAssertion', fields ['course_id', 'user', 'mode']
db.create_unique('certificates_badgeassertion', ['course_id', 'user_id', 'mode'])
def backwards(self, orm):
# Removing unique constraint on 'BadgeAssertion', fields ['course_id', 'user', 'mode']
db.delete_unique('certificates_badgeassertion', ['course_id', 'user_id', 'mode'])
# Adding unique constraint on 'BadgeAssertion', fields ['course_id', 'user']
db.create_unique('certificates_badgeassertion', ['course_id', 'user_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.badgeassertion': {
'Meta': {'unique_together': "(('course_id', 'user', 'mode'),)", 'object_name': 'BadgeAssertion'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'certificates.badgeimageconfiguration': {
'Meta': {'object_name': 'BadgeImageConfiguration'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '125'})
},
'certificates.certificategenerationconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'CertificateGenerationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'certificates.certificategenerationcoursesetting': {
'Meta': {'object_name': 'CertificateGenerationCourseSetting'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
'certificates.certificatehtmlviewconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'CertificateHtmlViewConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'configuration': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'certificates.certificatewhitelist': {
'Meta': {'object_name': 'CertificateWhitelist'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'whitelist': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'certificates.examplecertificate': {
'Meta': {'object_name': 'ExampleCertificate'},
'access_key': ('django.db.models.fields.CharField', [], {'default': "'25c5af67da3d47039aa8b00b3a929fa9'", 'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}),
'error_reason': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'example_cert_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['certificates.ExampleCertificateSet']"}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "u'John Do\\xeb'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '255'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'88190407a2f14c429a7b5336e3fb0189'", 'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'certificates.examplecertificateset': {
'Meta': {'object_name': 'ExampleCertificateSet'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '32'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates'] | agpl-3.0 |
bpollack/gerrit | tools/gitlog2asciidoc.py | 22 | 3306 | #!/usr/bin/python
from optparse import OptionParser
import re
import subprocess
import sys
"""
This script generates a release note from the output of git log
between the specified tags.
Options:
--issues Show output the commits with issues associated with them.
--issue-numbers Show outputs issue numbers of the commits with issues
associated with them
Arguments:
since -- tag name
until -- tag name
Example Input:
* <commit subject>
+
<commit message>
Bug: issue 123
Change-Id: <change id>
Signed-off-by: <name>
Expected Output:
* issue 123 <commit subject>
+
<commit message>
"""
parser = OptionParser(usage='usage: %prog [options] <since> <until>')
parser.add_option('-i', '--issues', action='store_true',
dest='issues_only', default=False,
help='only output the commits with issues association')
parser.add_option('-n', '--issue-numbers', action='store_true',
dest='issue_numbers_only', default=False,
help='only outputs issue numbers of the commits with \
issues association')
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("wrong number of arguments")
issues_only = options.issues_only
issue_numbers_only = options.issue_numbers_only
since_until = args[0] + '..' + args[1]
proc = subprocess.Popen(['git', 'log', '--reverse', '--no-merges',
since_until, "--format=* %s%n+%n%b"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,)
stdout_value = proc.communicate()[0]
subject = ""
message = []
is_issue = False
# regex pattern to match following cases such as Bug: 123, Issue Bug: 123,
# Bug: GERRIT-123, Bug: issue 123, Bug issue: 123, issue: 123, issue: bug 123
p = re.compile('bug: GERRIT-|bug(:? issue)?:? |issue(:? bug)?:? ',
re.IGNORECASE)
if issue_numbers_only:
for line in stdout_value.splitlines(True):
if p.match(line):
sys.stdout.write(p.sub('', line))
else:
for line in stdout_value.splitlines(True):
# Move issue number to subject line
if p.match(line):
line = p.sub('issue ', line).replace('\n',' ')
subject = subject[:2] + line + subject[2:]
is_issue = True
elif line.startswith('* '):
# Write change log for a commit
if subject != "":
if (not issues_only or is_issue):
# Write subject
sys.stdout.write(subject)
# Write message lines
if message != []:
# Clear + from last line in commit message
message[-1] = '\n'
for m in message:
sys.stdout.write(m)
# Start new commit block
message = []
subject = line
is_issue = False
# Remove commit footers
elif re.match(r'((\w+-)+\w+:)', line):
continue
# Don't add extra blank line if last one is already blank
elif line == '\n' and message and message[-1] != '+\n':
message.append('+\n')
elif line != '\n':
message.append(line)
| apache-2.0 |
kamilion/starcheat | starcheat/gui/quests.py | 2 | 4935 | """
Qt quests management dialog
"""
from gui.itemedit import ItemEditOptions
from gui.common import text_to_html
import qt_quests
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QMessageBox
def make_quest_info(name, data):
"""Return an HTML summary of a quest from its data."""
info = ""
info += "<b>%s</b><br>" % name
info += "%s<br><br>" % text_to_html(data["title"])
info += "%s<br><br>" % text_to_html(data["fullText"])
info += "<b>Pixels:</b> %s" % data["money"]
return info
class Quests():
def __init__(self, main_window):
self.dialog = QDialog(main_window.window)
self.ui = qt_quests.Ui_Dialog()
self.ui.setupUi(self.dialog)
self.main_window = main_window
self.assets = main_window.assets
self.player = main_window.player
self.quests = self.read_quests()
self.ui.quest_status.currentTextChanged.connect(self.filter_quests)
self.ui.quest_list.currentItemChanged.connect(self.lookup_quest)
self.ui.trash_button.clicked.connect(self.trash_quest)
self.ui.edit_button.clicked.connect(self.edit_quest)
self.filter_quests()
self.update_statuses()
def update_statuses(self):
"""Refresh quest status combo box."""
self.ui.quest_status.clear()
for status in sorted(self.quests.keys()):
self.ui.quest_status.addItem(status.capitalize())
def get_status(self):
status = self.ui.quest_status.currentText()
return status.lower()
def edit_quest(self):
"""Launch JSON edit dialog for selected quest."""
if self.ui.quest_list.currentItem() is None:
return
selected = self.selected_quest()
edit = ItemEditOptions(self.dialog,
selected[0],
selected[1],
"Edit Quest Data")
def save():
name, value = edit.get_option()
status = self.get_status()
self.quests[status][name] = value
self.lookup_quest()
edit.dialog.accepted.connect(save)
edit.ui.name.setEnabled(False)
edit.dialog.exec()
def trash_quest(self):
"""Confirm with user and delete selected quest."""
if self.ui.quest_list.currentItem() is None:
return
status = self.get_status()
quest_name = self.ui.quest_list.currentItem().text()
ask_dialog = QMessageBox(self.dialog)
ask_dialog.setWindowTitle("Trash Quest")
ask_dialog.setText("Are you sure?")
ask_dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ask_dialog.setDefaultButton(QMessageBox.No)
ask_dialog.setIcon(QMessageBox.Question)
if ask_dialog.exec() == QMessageBox.Yes:
self.quests[status].pop(quest_name)
self.update_statuses()
self.filter_quests()
self.lookup_quest()
def selected_quest(self):
"""Return selected quest name and data."""
status = self.get_status()
quest_name = self.ui.quest_list.currentItem().text()
return quest_name, self.quests[status][quest_name]
def lookup_quest(self):
"""Update info box and button statuses using selected quest."""
if self.ui.quest_list.currentItem() is None:
self.ui.trash_button.setEnabled(False)
self.ui.edit_button.setEnabled(False)
self.ui.quest_info_label.setText("<b>Nothing selected.</b>")
return
selected = self.selected_quest()
self.ui.trash_button.setEnabled(True)
self.ui.edit_button.setEnabled(True)
self.ui.quest_info_label.setText(make_quest_info(*selected))
def filter_quests(self):
"""Filter quest list based on selected status."""
status = self.get_status()
self.ui.quest_list.clear()
# no quests
if status not in self.quests:
return
for quest_id in self.quests[status].keys():
self.ui.quest_list.addItem(quest_id)
self.ui.quest_list.setFocus()
self.ui.quest_list.setCurrentRow(0)
def read_quests(self):
"""Read quests from metadata and return dict sorted by status."""
raw_quests = self.player.metadata.get_quests()
quests = {}
for k, v in raw_quests.items():
status = v["status"]
if status not in quests:
quests[status] = {}
quests[status][k] = v
return quests
def write_quests(self):
"""Convert and write sorted quest dict to metadata."""
quests = {}
for status_k, status_v in self.quests.items():
for quest_k, quest_v in status_v.items():
quests[quest_k] = quest_v
self.player.metadata.set_quests(quests)
self.main_window.window.setWindowModified(True)
| mit |
detiber/lib_openshift | test/test_v1_cinder_volume_source.py | 2 | 1328 | # coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_cinder_volume_source import V1CinderVolumeSource
class TestV1CinderVolumeSource(unittest.TestCase):
""" V1CinderVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1CinderVolumeSource(self):
"""
Test V1CinderVolumeSource
"""
model = lib_openshift.models.v1_cinder_volume_source.V1CinderVolumeSource()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dagwieers/ansible | test/units/modules/network/cnos/test_cnos_command.py | 45 | 4330 | # Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.cnos import cnos_command
from units.modules.utils import set_module_args
from .cnos_module import TestCnosModule, load_fixture
class TestCnosCommandModule(TestCnosModule):
module = cnos_command
def setUp(self):
super(TestCnosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.cnos.cnos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestCnosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
command = item
except ValueError:
command = 'show version'
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_cnos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Lenovo Networking Operating System (NOS) Software'))
def test_cnos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show running-config']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Lenovo Networking Operating System (NOS) Software'))
def test_cnos_command_wait_for(self):
wait_for = 'result[0] contains "Lenovo Networking Operating System (NOS) Software"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_cnos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_cnos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_cnos_command_match_any(self):
wait_for = ['result[0] contains "Lenovo Networking Operating System (NOS) Software"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_cnos_command_match_all(self):
wait_for = ['result[0] contains "Lenovo Networking Operating System (NOS) Software"',
'result[0] contains "Lenovo"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_cnos_command_match_all_failure(self):
wait_for = ['result[0] contains "Lenovo ENOS"',
'result[0] contains "test string"']
commands = ['show version', 'show run']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
bendetat/survey-thing | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/generator/ninja_test.py | 1843 | 1786 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dsavransky/EXOSIMS | EXOSIMS/SurveyEnsemble/IPClusterEnsemble.py | 1 | 7185 | from __future__ import print_function
from ipyparallel import Client
from EXOSIMS.Prototypes.SurveyEnsemble import SurveyEnsemble
from EXOSIMS.util.get_module import get_module
import time
from IPython.core.display import clear_output
import sys
import json
import os
import numpy as np
import EXOSIMS
import EXOSIMS.MissionSim
import os
import os.path
try:
import cPickle as pickle
except ImportError:
import pickle
import random
import traceback
import subprocess
class IPClusterEnsemble(SurveyEnsemble):
"""Parallelized suvey ensemble based on IPython parallel (ipcluster)
"""
def __init__(self, **specs):
SurveyEnsemble.__init__(self, **specs)
self.verb = specs.get('verbose', True)
# access the cluster
self.rc = Client()
self.dview = self.rc[:]
self.dview.block = True
with self.dview.sync_imports(): import EXOSIMS, EXOSIMS.util.get_module, \
os, os.path, time, random, pickle, traceback, numpy
if 'logger' in specs:
specs.pop('logger')
if 'seed' in specs:
specs.pop('seed')
self.dview.push(dict(specs=specs))
self.vprint("Building SurveySimulation object on all workers.")
res = self.dview.execute("SS = EXOSIMS.util.get_module.get_module(specs['modules'] \
['SurveySimulation'], 'SurveySimulation')(**specs)")
res2 = self.dview.execute("SS.reset_sim()")
self.vprint("Created SurveySimulation objects on %d engines."%len(self.rc.ids))
#for row in res.stdout:
# self.vprint(row)
self.lview = self.rc.load_balanced_view()
self.maxNumEngines = len(self.rc.ids)
def run_ensemble(self, sim, nb_run_sim, run_one=None, genNewPlanets=True,
rewindPlanets=True, kwargs={}):
"""
Args:
sim:
"""
hangingRunsOccured = False # keeps track of whether hanging runs have occured
t1 = time.time()
async_res = []
for j in range(nb_run_sim):
ar = self.lview.apply_async(run_one, genNewPlanets=genNewPlanets,
rewindPlanets=rewindPlanets, **kwargs)
async_res.append(ar)
print("Submitted %d tasks."%len(async_res))
engine_pids = self.rc[:].apply(os.getpid).get_dict()
#ar2 = self.lview.apply_async(os.getpid)
#pids = ar2.get_dict()
print('engine_pids')
print(engine_pids)
runStartTime = time.time()#create job starting time
avg_time_per_run = 0.
tmplenoutstandingset = nb_run_sim
tLastRunFinished = time.time()
ar= self.rc._asyncresult_from_jobs(async_res)
while not ar.ready():
ar.wait(10.)
clear_output(wait=True)
if ar.progress > 0:
timeleft = ar.elapsed/ar.progress * (nb_run_sim - ar.progress)
if timeleft > 3600.:
timeleftstr = "%2.2f hours"%(timeleft/3600.)
elif timeleft > 60.:
timeleftstr = "%2.2f minutes"%(timeleft/60.)
else:
timeleftstr = "%2.2f seconds"%timeleft
else:
timeleftstr = "who knows"
#Terminate hanging runs
outstandingset = self.rc.outstanding#a set of msg_ids that have been submitted but resunts have not been received
if len(outstandingset) > 0 and len(outstandingset) < nb_run_sim:#there is at least 1 run still going and we have not just started
avg_time_per_run = (time.time() - runStartTime)/float(nb_run_sim - len(outstandingset))#compute average amount of time per run
if len(outstandingset) < tmplenoutstandingset:#The scheduler has finished a run
tmplenoutstandingset = len(outstandingset)#update this. should decrease by ~1 or number of cores...
tLastRunFinished = time.time()#update tLastRunFinished to the last time a simulation finished (right now)
#self.vprint("tmplenoutstandingset %d, tLastRunFinished %0.6f"%(tmplenoutstandingset,tLastRunFinished))
if time.time() - tLastRunFinished > avg_time_per_run*(1. + self.maxNumEngines*2.)*4.:
#nb_run_sim = len(self.rc.outstanding)
#restartRuns = True
self.vprint('Aborting ' + str(len(self.rc.outstanding)) + 'qty outstandingset jobs')
#runningPIDS = os.listdir('/proc') # get all running pids
self.vprint('queue_status')
self.vprint(str(self.rc.queue_status()))
self.rc.abort()
ar.wait(20)
runningPIDS = [int(tpid) for tpid in os.listdir('/proc') if tpid.isdigit()]
#[self.rc.queue_status()[eind] for eind in np.arange(self.maxNumEngines) if self.rc.queue_status()[eind]['tasks']>0]
for engineInd in [eind for eind in np.arange(self.maxNumEngines) if self.rc.queue_status()[eind]['tasks']>0]:
os.kill(engine_pids[engineInd],15)
time.sleep(20)
# for pid in [engine_pids[eind] for eind in np.arange(len(engine_pids))]:
# if pid in runningPIDS:
# os.kill(pid,9) # send kill command to stop this worker
stopIPClusterCommand = subprocess.Popen(['ipcluster','stop'])
stopIPClusterCommand.wait()
time.sleep(60) # doing this instead of waiting for ipcluster to terminate
stopIPClusterCommand = subprocess.Popen(['ipcluster','stop'])
stopIPClusterCommand.wait()
time.sleep(60) # doing this instead of waiting for ipcluster to terminate
hangingRunsOccured = True # keeps track of whether hanging runs have occured
break
#stopIPClusterCommand.wait() # waits for process to terminate
#call(["ipcluster","stop"]) # send command to stop ipcluster
#self.rc.abort(jobs=self.rc.outstanding.copy().pop())
#self.rc.abort()#by default should abort all outstanding jobs... #it is possible that this will not stop the jobs running
#ar.wait(100)
#self.rc.purge_everything() # purge all results if outstanding *because rc.abort() didn't seem to do the job right
tLastRunFinished = time.time()#update tLastRunFinished to the last time a simulation was restarted (right now)
print("%4i/%i tasks finished after %4i s. About %s to go." % (ar.progress, nb_run_sim, ar.elapsed, timeleftstr), end="")
sys.stdout.flush()
#numRunStarts += 1 # increment number of run restarts
t2 = time.time()
print("\nCompleted in %d sec" % (t2 - t1))
if hangingRunsOccured: #hanging runs have occured
res = [1]
else:
res = [ar.get() for ar in async_res]
return res
| bsd-3-clause |
mattwthompson/mdtraj | mdtraj/testing/docstrings.py | 12 | 7568 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import sys
import types
import warnings
import pkgutil
import importlib
from inspect import (isclass, ismodule, isfunction, ismethod,
getmembers, getdoc, getmodule, getargs, isbuiltin)
from mdtraj.testing.docscrape import NumpyDocString
from mdtraj.utils.six import get_function_code, get_function_closure, PY2
__all__ = ['docstring_verifiers', 'import_all_modules']
##############################################################################
# functions
##############################################################################
def docstring_verifiers(module, error_on_none=False):
"""Yield an iterable of tests that verify the docstrings (for
adherance to the NumpyDoc format) of all functions/classes defined
in a module.
Parameters
----------
module : module
The module to test
error_on_none : bool, default=False
Throw an error if no docstring is defined
"""
# These are the types that we want to check
# currently, the docstring on classes are not being checked, since
# isclass() is not in the list
acceptors = [isfunction, ismethod, isbuiltin]
accept = lambda f: any([acc(f) for acc in acceptors])
functions = [f for f in walk(module) if accept(f)]
def format(f):
"""
Format a method/function/class as a string
Parameters
----------
f : function, method, class
Returns
-------
repr : string
A string represntation
"""
if ismethod(f):
if PY2:
return '.'.join([getmodule(f).__name__, f.im_class.__name__, f.__name__])
else:
return '.'.join([getmodule(f).__name__, f.__self__.__class__.__name__, f.__name__])
if isfunction(f) or isbuiltin(f):
return '.'.join([getmodule(f).__name__, f.__name__])
if isclass(f):
return f.__name__
return 'Error'
def check_docstring(f):
"""
Ensure the docstring of `f` is in accordance with the numpy standard
Currently, only the Parameters section of the docstring is checked.
Parameters
----------
f : function, method, class
Returns
-------
repr : string
A string represntation
"""
doc = getdoc(f)
if doc is None:
if error_on_none:
raise ValueError('no docstring for %s' % format(f))
else:
with warnings.catch_warnings():
warnings.simplefilter('error')
parsed = NumpyDocString(doc)
param_names = {e[0] for e in parsed['Parameters']}
if isbuiltin(f):
# You can't get the arglist from a builtin, which
# is how cython functions turn up
# but you can, hackily, get the number of arguments it wants
# by parseing the error hen you supply too many
import re
try:
f(*list(range(100)))
except TypeError as e:
m = re.search('takes at most (\d+) positional arguments', str(e))
if not m:
return
n_args = int(m.group(1))
if len(param_names) != n_args:
raise ValueError("In %s, number of arguments, %d, doesn't "
" match the length of the Parameters in the "
"docstring, %d" % (format(f), n_args, len(param_names)))
return
args = set(getargs(get_function_code(f)).args)
if 'self' in args:
args.remove('self')
if 'cls' in args:
args.remove('cls')
if args != param_names:
raise ValueError("In %s, arguments %s don't "
"match Parameters list %s" % (format(f),
list(args), list(param_names)))
for f in functions:
qq = lambda: check_docstring(f)
qq.description = 'NumpyDoc: %s.%s' % (module.__name__, f.__name__)
qq.fname = f.__name__
yield qq
def ispackage(obj):
"""
Check if obj is a package. Simply look for whether its a module whose
filename is __init__.py(c)
Parameters
----------
obj : module
"""
if ismodule(obj):
return obj.__file__.endswith("__init__.pyc") or \
obj.__file__.endswith("__init__.py")
return False
def walk(module):
"""
Get all of the functions, classes and their methods defined within
a python module
Parameters
----------
module : module
Returns
-------
funcs : list
List of functions, classes and methods
"""
assert ismodule(module)
if ispackage(module):
raise ValueError('Sorry, you need to supply me a module, not a package')
def is_valid(obj):
if getmodule(obj) == module:
# cython specific stuff
if module.__file__.endswith('.so'):
if isbuiltin(obj):
return not obj.__name__.startswith('_')
if ismethod(obj) or isfunction(obj):
return not obj.__name__.startswith('_')
if isclass(obj):
return True
return False
instack = [v for k, v in getmembers(module) if is_valid(v)]
outstack = []
while True:
try:
item = instack.pop()
except IndexError:
break
outstack.append(item)
if isclass(item):
instack.extend([v for k, v in getmembers(item) if is_valid(v)])
return outstack
def import_all_modules(pkg):
result = []
for _, modname, ispkg in pkgutil.iter_modules(pkg.__path__):
c = '%s.%s' % (pkg.__name__, modname)
if modname.startswith('test_'):
continue
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
mod = importlib.import_module(c)
if ispkg:
result.extend(import_all_modules(mod))
else:
result.append(mod)
except ImportError as e:
print('e', e)
continue
return result
| lgpl-2.1 |
mthornhill/dj-stripe | tests/test_contrib/test_serializers.py | 12 | 2953 | """
.. module:: dj-stripe.tests.test_contrib.test_serializers
:synopsis: dj-stripe Serializer Tests.
.. moduleauthor:: Philippe Luickx (@philippeluickx)
"""
from __future__ import unicode_literals
from decimal import Decimal
from django.test import TestCase
from django.utils import timezone
from django.conf import settings
from mock import patch, PropertyMock
from djstripe.contrib.rest_framework.serializers import SubscriptionSerializer, CreateSubscriptionSerializer
from djstripe.models import CurrentSubscription
class SubscriptionSerializerTest(TestCase):
def test_valid_serializer(self):
now = timezone.now()
serializer = SubscriptionSerializer(
data={
'plan': settings.DJSTRIPE_PLANS['test0']['plan'],
'quantity': 2,
'start': now,
'status': CurrentSubscription.STATUS_ACTIVE,
'amount': settings.DJSTRIPE_PLANS['test0']['price'],
}
)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data, {
'plan': 'test0',
'quantity': 2,
'start': now,
'status': 'active',
'amount': Decimal('1000'),
})
self.assertEqual(serializer.errors, {})
def test_invalid_serializer(self):
now = timezone.now()
serializer = SubscriptionSerializer(
data={
'plan': settings.DJSTRIPE_PLANS['test0']['plan'],
'start': now,
'status': CurrentSubscription.STATUS_ACTIVE,
'amount': settings.DJSTRIPE_PLANS['test0']['price'],
}
)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.validated_data, {})
self.assertEqual(serializer.errors, {'quantity': ['This field is required.']})
class CreateSubscriptionSerializerTest(TestCase):
@patch("stripe.Token.create", return_value=PropertyMock(id="token_test"))
def test_valid_serializer(self, stripe_token_mock):
token = stripe_token_mock(card={})
serializer = CreateSubscriptionSerializer(
data={
'plan': settings.DJSTRIPE_PLANS['test0']['plan'],
'stripe_token': token.id,
}
)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['plan'], 'test0')
self.assertIn('stripe_token', serializer.validated_data)
self.assertEqual(serializer.errors, {})
def test_invalid_serializer(self):
serializer = CreateSubscriptionSerializer(
data={
'plan': settings.DJSTRIPE_PLANS['test0']['plan'],
}
)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.validated_data, {})
self.assertEqual(serializer.errors, {
'stripe_token': ['This field is required.'],
})
| bsd-3-clause |
mountain213/neo4jworkshop | REST_clients/Python_client/requests/packages/urllib3/contrib/ntlmpool.py | 1010 | 4507 | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| mit |
mensler/ansible | lib/ansible/modules/network/nxos/nxos_vtp_password.py | 46 | 8006 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vtp_password
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VTP password configuration.
description:
- Manages VTP password configuration.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- VTP feature must be active on the device to use this module.
- This module is used to manage only VTP passwords.
- Use this in combination with M(nxos_vtp_domain) and M(nxos_vtp_version)
to fully manage VTP operations.
- You can set/remove password only if a VTP domain already exist.
- If C(state=absent) and no C(vtp_password) is provided, it remove the current
VTP password.
- If C(state=absent) and C(vtp_password) is provided, the proposed C(vtp_password)
has to match the existing one in order to remove it.
options:
vtp_password:
description:
- VTP password
required: false
default: null
state:
description:
- Manage the state of the resource
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ENSURE VTP PASSWORD IS SET
- nxos_vtp_password:
password: ntc
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ENSURE VTP PASSWORD IS REMOVED
- nxos_vtp_password:
password: ntc
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"vtp_password": "new_ntc"}
existing:
description:
- k/v pairs of existing vtp
returned: always
type: dict
sample: {"domain": "ntc", "version": "1", "vtp_password": "ntc"}
end_state:
description: k/v pairs of vtp after module execution
returned: always
type: dict
sample: {"domain": "ntc", "version": "1", "vtp_password": "new_ntc"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["vtp password new_ntc"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_vtp_config(module):
command = 'show vtp status'
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
vtp_parsed = {}
if body:
version_regex = '.*VTP version running\s+:\s+(?P<version>\d).*'
domain_regex = '.*VTP Domain Name\s+:\s+(?P<domain>\S+).*'
try:
match_version = re.match(version_regex, body, re.DOTALL)
version = match_version.groupdict()['version']
except AttributeError:
version = ''
try:
match_domain = re.match(domain_regex, body, re.DOTALL)
domain = match_domain.groupdict()['domain']
except AttributeError:
domain = ''
if domain and version:
vtp_parsed['domain'] = domain
vtp_parsed['version'] = version
vtp_parsed['vtp_password'] = get_vtp_password(module)
return vtp_parsed
def get_vtp_password(module):
command = 'show vtp password'
body = execute_show_command(command, module)[0]
password = body['passwd']
if password:
return str(password)
else:
return ""
def main():
argument_spec = dict(
vtp_password=dict(type='str', no_log=True),
state=dict(choices=['absent', 'present'],
default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
vtp_password = module.params['vtp_password'] or None
state = module.params['state']
existing = get_vtp_config(module)
end_state = existing
args = dict(vtp_password=vtp_password)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'absent':
if vtp_password is not None:
if existing['vtp_password'] == proposed['vtp_password']:
commands.append(['no vtp password'])
else:
module.fail_json(msg="Proposed vtp password doesn't match "
"current vtp password. It cannot be "
"removed when state=absent. If you are "
"trying to change the vtp password, use "
"state=present.")
else:
if not existing.get('domain'):
module.fail_json(msg='Cannot remove a vtp password '
'before vtp domain is set.')
elif existing['vtp_password'] != ('\\'):
commands.append(['no vtp password'])
elif state == 'present':
if delta:
if not existing.get('domain'):
module.fail_json(msg='Cannot set vtp password '
'before vtp domain is set.')
else:
commands.append(['vtp password {0}'.format(vtp_password)])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_vtp_config(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
carnotweat/cpupimp | libs/suds/resolver.py | 205 | 15814 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{resolver} module provides a collection of classes that
provide wsdl/xsd named type resolution.
"""
import re
from logging import getLogger
from suds import *
from suds.sax import splitPrefix, Namespace
from suds.sudsobject import Object
from suds.xsd.query import BlindQuery, TypeQuery, qualify
log = getLogger(__name__)
class Resolver:
"""
An I{abstract} schema-type resolver.
@ivar schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
def __init__(self, schema):
"""
@param schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
self.schema = schema
def find(self, name, resolved=True):
"""
Get the definition object for the schema object by name.
@param name: The name of a schema object.
@type name: basestring
@param resolved: A flag indicating that the fully resolved type
should be returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
log.debug('searching schema for (%s)', name)
qref = qualify(name, self.schema.root, self.schema.tns)
query = BlindQuery(qref)
result = query.execute(self.schema)
if result is None:
log.error('(%s) not-found', name)
return None
log.debug('found (%s) as (%s)', name, Repr(result))
if resolved:
result = result.resolve()
return result
class PathResolver(Resolver):
"""
Resolveds the definition object for the schema type located at the specified path.
The path may contain (.) dot notation to specify nested types.
@ivar wsdl: A wsdl object.
@type wsdl: L{wsdl.Definitions}
"""
def __init__(self, wsdl, ps='.'):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
@param ps: The path separator character
@type ps: char
"""
Resolver.__init__(self, wsdl.schema)
self.wsdl = wsdl
self.altp = re.compile('({)(.+)(})(.+)')
self.splitp = re.compile('({.+})*[^\%s]+' % ps[0])
def find(self, path, resolved=True):
"""
Get the definition object for the schema type located at the specified path.
The path may contain (.) dot notation to specify nested types.
Actually, the path separator is usually a (.) but can be redefined
during contruction.
@param path: A (.) separated path to a schema type.
@type path: basestring
@param resolved: A flag indicating that the fully resolved type
should be returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
result = None
parts = self.split(path)
try:
result = self.root(parts)
if len(parts) > 1:
result = result.resolve(nobuiltin=True)
result = self.branch(result, parts)
result = self.leaf(result, parts)
if resolved:
result = result.resolve(nobuiltin=True)
except PathResolver.BadPath:
log.error('path: "%s", not-found' % path)
return result
def root(self, parts):
"""
Find the path root.
@param parts: A list of path parts.
@type parts: [str,..]
@return: The root.
@rtype: L{xsd.sxbase.SchemaObject}
"""
result = None
name = parts[0]
log.debug('searching schema for (%s)', name)
qref = self.qualify(parts[0])
query = BlindQuery(qref)
result = query.execute(self.schema)
if result is None:
log.error('(%s) not-found', name)
raise PathResolver.BadPath(name)
else:
log.debug('found (%s) as (%s)', name, Repr(result))
return result
def branch(self, root, parts):
"""
Traverse the path until the leaf is reached.
@param parts: A list of path parts.
@type parts: [str,..]
@param root: The root.
@type root: L{xsd.sxbase.SchemaObject}
@return: The end of the branch.
@rtype: L{xsd.sxbase.SchemaObject}
"""
result = root
for part in parts[1:-1]:
name = splitPrefix(part)[1]
log.debug('searching parent (%s) for (%s)', Repr(result), name)
result, ancestry = result.get_child(name)
if result is None:
log.error('(%s) not-found', name)
raise PathResolver.BadPath(name)
else:
result = result.resolve(nobuiltin=True)
log.debug('found (%s) as (%s)', name, Repr(result))
return result
def leaf(self, parent, parts):
"""
Find the leaf.
@param parts: A list of path parts.
@type parts: [str,..]
@param parent: The leaf's parent.
@type parent: L{xsd.sxbase.SchemaObject}
@return: The leaf.
@rtype: L{xsd.sxbase.SchemaObject}
"""
name = splitPrefix(parts[-1])[1]
if name.startswith('@'):
result, path = parent.get_attribute(name[1:])
else:
result, ancestry = parent.get_child(name)
if result is None:
raise PathResolver.BadPath(name)
return result
def qualify(self, name):
"""
Qualify the name as either:
- plain name
- ns prefixed name (eg: ns0:Person)
- fully ns qualified name (eg: {http://myns-uri}Person)
@param name: The name of an object in the schema.
@type name: str
@return: A qualifed name.
@rtype: qname
"""
m = self.altp.match(name)
if m is None:
return qualify(name, self.wsdl.root, self.wsdl.tns)
else:
return (m.group(4), m.group(2))
def split(self, s):
"""
Split the string on (.) while preserving any (.) inside the
'{}' alternalte syntax for full ns qualification.
@param s: A plain or qualifed name.
@type s: str
@return: A list of the name's parts.
@rtype: [str,..]
"""
parts = []
b = 0
while 1:
m = self.splitp.match(s, b)
if m is None:
break
b,e = m.span()
parts.append(s[b:e])
b = e+1
return parts
class BadPath(Exception): pass
class TreeResolver(Resolver):
"""
The tree resolver is a I{stateful} tree resolver
used to resolve each node in a tree. As such, it mirrors
the tree structure to ensure that nodes are resolved in
context.
@ivar stack: The context stack.
@type stack: list
"""
def __init__(self, schema):
"""
@param schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
Resolver.__init__(self, schema)
self.stack = Stack()
def reset(self):
"""
Reset the resolver's state.
"""
self.stack = Stack()
def push(self, x):
"""
Push an I{object} onto the stack.
@param x: An object to push.
@type x: L{Frame}
@return: The pushed frame.
@rtype: L{Frame}
"""
if isinstance(x, Frame):
frame = x
else:
frame = Frame(x)
self.stack.append(frame)
log.debug('push: (%s)\n%s', Repr(frame), Repr(self.stack))
return frame
def top(self):
"""
Get the I{frame} at the top of the stack.
@return: The top I{frame}, else None.
@rtype: L{Frame}
"""
if len(self.stack):
return self.stack[-1]
else:
return Frame.Empty()
def pop(self):
"""
Pop the frame at the top of the stack.
@return: The popped frame, else None.
@rtype: L{Frame}
"""
if len(self.stack):
popped = self.stack.pop()
log.debug('pop: (%s)\n%s', Repr(popped), Repr(self.stack))
return popped
else:
log.debug('stack empty, not-popped')
return None
def depth(self):
"""
Get the current stack depth.
@return: The current stack depth.
@rtype: int
"""
return len(self.stack)
def getchild(self, name, parent):
""" get a child by name """
log.debug('searching parent (%s) for (%s)', Repr(parent), name)
if name.startswith('@'):
return parent.get_attribute(name[1:])
else:
return parent.get_child(name)
class NodeResolver(TreeResolver):
"""
The node resolver is a I{stateful} XML document resolver
used to resolve each node in a tree. As such, it mirrors
the tree structure to ensure that nodes are resolved in
context.
"""
def __init__(self, schema):
"""
@param schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
TreeResolver.__init__(self, schema)
def find(self, node, resolved=False, push=True):
"""
@param node: An xml node to be resolved.
@type node: L{sax.element.Element}
@param resolved: A flag indicating that the fully resolved type should be
returned.
@type resolved: boolean
@param push: Indicates that the resolved type should be
pushed onto the stack.
@type push: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
name = node.name
parent = self.top().resolved
if parent is None:
result, ancestry = self.query(name, node)
else:
result, ancestry = self.getchild(name, parent)
known = self.known(node)
if result is None:
return result
if push:
frame = Frame(result, resolved=known, ancestry=ancestry)
pushed = self.push(frame)
if resolved:
result = result.resolve()
return result
def findattr(self, name, resolved=True):
"""
Find an attribute type definition.
@param name: An attribute name.
@type name: basestring
@param resolved: A flag indicating that the fully resolved type should be
returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
name = '@%s'%name
parent = self.top().resolved
if parent is None:
result, ancestry = self.query(name, node)
else:
result, ancestry = self.getchild(name, parent)
if result is None:
return result
if resolved:
result = result.resolve()
return result
def query(self, name, node):
""" blindly query the schema by name """
log.debug('searching schema for (%s)', name)
qref = qualify(name, node, node.namespace())
query = BlindQuery(qref)
result = query.execute(self.schema)
return (result, [])
def known(self, node):
""" resolve type referenced by @xsi:type """
ref = node.get('type', Namespace.xsins)
if ref is None:
return None
qref = qualify(ref, node, node.namespace())
query = BlindQuery(qref)
return query.execute(self.schema)
class GraphResolver(TreeResolver):
"""
The graph resolver is a I{stateful} L{Object} graph resolver
used to resolve each node in a tree. As such, it mirrors
the tree structure to ensure that nodes are resolved in
context.
"""
def __init__(self, schema):
"""
@param schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
TreeResolver.__init__(self, schema)
def find(self, name, object, resolved=False, push=True):
"""
@param name: The name of the object to be resolved.
@type name: basestring
@param object: The name's value.
@type object: (any|L{Object})
@param resolved: A flag indicating that the fully resolved type
should be returned.
@type resolved: boolean
@param push: Indicates that the resolved type should be
pushed onto the stack.
@type push: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
known = None
parent = self.top().resolved
if parent is None:
result, ancestry = self.query(name)
else:
result, ancestry = self.getchild(name, parent)
if result is None:
return None
if isinstance(object, Object):
known = self.known(object)
if push:
frame = Frame(result, resolved=known, ancestry=ancestry)
pushed = self.push(frame)
if resolved:
if known is None:
result = result.resolve()
else:
result = known
return result
def query(self, name):
""" blindly query the schema by name """
log.debug('searching schema for (%s)', name)
schema = self.schema
wsdl = self.wsdl()
if wsdl is None:
qref = qualify(name, schema.root, schema.tns)
else:
qref = qualify(name, wsdl.root, wsdl.tns)
query = BlindQuery(qref)
result = query.execute(schema)
return (result, [])
def wsdl(self):
""" get the wsdl """
container = self.schema.container
if container is None:
return None
else:
return container.wsdl
def known(self, object):
""" get the type specified in the object's metadata """
try:
md = object.__metadata__
known = md.sxtype
return known
except:
pass
class Frame:
def __init__(self, type, resolved=None, ancestry=()):
self.type = type
if resolved is None:
resolved = type.resolve()
self.resolved = resolved.resolve()
self.ancestry = ancestry
def __str__(self):
return '%s\n%s\n%s' % \
(Repr(self.type),
Repr(self.resolved),
[Repr(t) for t in self.ancestry])
class Empty:
def __getattr__(self, name):
if name == 'ancestry':
return ()
else:
return None
class Stack(list):
def __repr__(self):
result = []
for item in self:
result.append(repr(item))
return '\n'.join(result) | gpl-3.0 |
noelbk/openipmi | swig/python/openipmigui/gui_term.py | 3 | 9780 | # gui_term.py
#
# openipmi GUI terminal handling for SoL
#
# Author: MontaVista Software, Inc.
# Corey Minyard <minyard@mvista.com>
# source@mvista.com
#
# Copyright 2006 MontaVista Software Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import Tix
import _term
def gpos(x, y):
return str(y+1) + "." + str(x)
class Terminal(_term.TerminalEmulator):
def __init__(self, parent):
self.fsize = 8
_term.TerminalEmulator.__init__(self)
self.fonts = ("-family terminal -size " + str(self.fsize)
+ " -weight normal -underline 0",
# Bold
"-family terminal -size " + str(self.fsize)
+ " -weight bold -underline 0",
# Underline
"-family terminal -size " + str(self.fsize)
+ " -weight normal -underline 1",
# Bold Underline
"-family terminal -size " + str(self.fsize)
+ " -weight bold -underline 1")
self.colors = ("black",
"red",
"green",
"yellow",
"blue",
"magenta",
"cyan",
"white")
self.text = Tix.Text(parent, width=self.width, height=self.height,
state="normal", wrap="none", font=self.fonts[0])
self.text.pack(side=Tix.TOP, fill=Tix.BOTH, expand=1)
# We create a tag for every possible terminal configuration, based
# upon an equation using the colors and font type.
for f in range(0, len(self.fonts)):
for fg in range(0, len(self.colors)):
for bg in range(0, len(self.colors)):
idx = (f * 8 * 8) + (fg * 8) + bg;
self.text.tag_configure(str(idx), font=self.fonts[f],
foreground=self.colors[fg],
background=self.colors[bg])
pass
pass
pass
self.default_tag = str(7 * 8) # white forground, black background
for i in range(1, self.height+1):
self.text.insert(str(i) + ".1", "%*s\n" % (self.width, ""),
self.default_tag)
pass
self.handle_cursor()
self.text.bind("<Key>", self.HandleChar)
self.text.bind("<Control-Key>", self.HandleControlChar)
self.text.focus_set()
return
def DrawText(self, fg_color, bg_color, flags, x, y, val):
if (flags & _term.INVERSE):
tmp = bg_color
bg_color = fg_color
fg_color = tmp
pass
# FIXME - we don't handle blinking or concealed
flags &= 3
tag = str((flags * 8 * 8) + (fg_color * 8) + bg_color);
self.text.delete(gpos(x, y), gpos(x+len(val), y))
self.text.insert(gpos(x, y), val, tag)
return
def DrawCursor(self, fg_color, bg_color, flags, x, y, val):
# Draw cursor as inverse, just flip the background and foreground
self.DrawText(bg_color, fg_color, flags, x, y, val)
return
def ScrollLines(self, y1, y2):
self.text.delete(gpos(0, y1), gpos(0, y1+1))
if (y2 == self.height-1):
self.text.insert("end", "\n%*s" % (self.width, ""),
self.default_tag)
pass
else:
# Not (y2+1), because we deleted a line
pos = gpos(0, y2)
self.text.insert(pos, "%*s\n" % (self.width, ""),
self.default_tag)
pass
return
def ScrollLinesUp(self, y1, y2):
self.text.delete(gpos(0, y2), gpos(0, y2+1))
if (y1 == self.height-1):
self.text.insert("end", "\n%*s" % (self.width, ""),
self.default_tag)
pass
else:
pos = gpos(0, y1)
self.text.insert(pos, "%*s\n" % (self.width, ""),
self.default_tag)
pass
return
def DeleteChars(self, x, y, len):
pos = gpos(x, y)
endpos = gpos(x+len, y)
self.text.delete(pos, endpos)
self.text.insert(str(y+1) + ".end", "%*s" % (len, ""),
self.default_tag)
return
def InsertChars(self, x, y, len):
self.text.delete(gpos(self.width-len, y), gpos(self.width, y))
self.text.insert(gpos(x, y), "%*s" % (len, ""),
self.default_tag)
return
def SendBack(self, data):
self.HandleTerminalOutput(data)
return
def Bell(self):
return
def RequestSizeChange(self, w, h):
return
def HandleChar(self, event):
key = event.keysym
if (len(key) == 1):
s = key
elif (key == "Return") or (key == "KP_Enter"):
s = "\x0d"
elif (key == "Backspace"):
s = "\x08"
elif (key == "Up"):
s = "\x1b[A"
elif (key == "Down"):
s = "\x1b[B"
elif (key == "Right"):
s = "\x1b[C"
elif (key == "Left"):
s = "\x1b[D"
elif (key == "Next"):
s = "\x1b[6~"
elif (key == "Prior"):
s = "\x1b[5~"
elif (key == "Insert"):
s = "\x1b[2~"
elif (key == "Home"):
s = "\x1b[OH"
elif (key == "End"):
s = "\x1b[OF"
elif (key == "Delete"):
s = "\x1b[3~"
elif (key == "F1"):
s = "\x1bOP"
elif (key == "F2"):
s = "\x1bOQ"
elif (key == "F3"):
s = "\x1bOR"
elif (key == "F4"):
s = "\x1bOS"
elif (key == "F5"):
s = "\x1b[15~"
elif (key == "F6"):
s = "\x1b[17~"
elif (key == "F7"):
s = "\x1b[18~"
elif (key == "F8"):
s = "\x1b[19~"
elif (key == "F9"):
s = "\x1b[20~"
elif (key == "F10"):
s = "\x1b[21~"
elif (key == "F11"):
s = "\x1b[23~"
elif (key == "F12"):
s = "\x1b[24~"
elif (len(event.char) == 1) and (event.char < chr(255)):
s = event.char
# Keypad stuff comes after the check because if numlock is off, the
# event.char will be empty
elif (key == "KP_Add"):
s = "\x1bOl"
elif (key == "KP_Subtract"):
s = "\x1bOm"
elif (key == "KP_Delete"):
s = "\x1bOn"
elif (key == "KP_Multiply"):
s = "\x1bOQ"
elif (key == "KP_Divide"):
s = "\x1bOR"
elif (key == "KP_Insert"):
s = "\x1bOp"
elif (key == "KP_End"):
s = "\x1bOq"
elif (key == "KP_Down"):
s = "\x1bOr"
elif (key == "KP_Next"):
s = "\x1bOs"
elif (key == "KP_Left"):
s = "\x1bOt"
elif (key == "KP_Begin"):
s = "\x1bOu"
elif (key == "KP_Right"):
s = "\x1bOv"
elif (key == "KP_Home"):
s = "\x1bOw"
elif (key == "KP_Up"):
s = "\x1bOx"
elif (key == "KP_Prior"):
s = "\x1bOy"
else:
return "break"
self.HandleTerminalOutput(s)
return "break"
def HandleControlChar(self, event):
key = event.keysym
if (len(key) != 1):
return
if ((key >= 'A') and (key <= 'B')): # '@' 'A' .. 'Z'
s = "%c" % (ord(key)-64)
pass
elif ((key >= 'a') and (key <= 'z')): # 'a' .. 'z'
s = "%c" % (ord(key)-96)
elif (key < ' '):
s = key
pass
else:
return "break"
self.HandleTerminalOutput(s)
return "break"
pass
import sys
class TestTerm(Terminal):
def __init__(self, parent):
Terminal.__init__(self, parent)
return
def HandleTerminalOutput(self, s):
# FIXME: hacks to remove
sys.stdout.write(s)
sys.stdout.flush()
if (s[0] == '\x01'):
self.ProcessInput("abcdefghijk")
return
self.ProcessInput(s)
return
pass
class MyApp(Tix.Tk):
def __init__(self):
Tix.Tk.__init__(self)
term = TestTerm(self)
return
pass
if __name__ == "__main__":
app = MyApp()
app.mainloop()
| gpl-2.0 |
runjmc/maraschino | lib/sqlalchemy/dialects/mssql/__init__.py | 23 | 1112 | # mssql/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \
pymssql, zxjdbc, mxodbc
base.dialect = pyodbc.dialect
from sqlalchemy.dialects.mssql.base import \
INTEGER, BIGINT, SMALLINT, TINYINT, VARCHAR, NVARCHAR, CHAR, \
NCHAR, TEXT, NTEXT, DECIMAL, NUMERIC, FLOAT, DATETIME,\
DATETIME2, DATETIMEOFFSET, DATE, TIME, SMALLDATETIME, \
BINARY, VARBINARY, BIT, REAL, IMAGE, TIMESTAMP,\
MONEY, SMALLMONEY, UNIQUEIDENTIFIER, SQL_VARIANT, dialect
__all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
) | mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.