repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
GuillaumeArruda/INF8601 | inf8601-lab1-2.1.0/preprocess.py | 1 | 1847 | #!/usr/bin/python
#
# Utilitaire pour pre-traiter les resultats
#
import os
# variables
out_dir = "results"
base = "time_dragonizer"
suffix = ".data"
def get_or_create(data, key):
if (data.has_key(key) == False):
data[key] = {}
return data[key]
def get_or_create_path(data, path_elem):
if (len(path_elem) == 0):
return data
d = data
for elem in path_elem:
d = get_or_create(d, elem)
return d
class Stat(object):
def __init__(self):
self.sum = 0.0
self.cnt = 0
def add_sample(self, item):
self.sum += item
self.cnt += 1
def avg(self):
return self.sum / self.cnt
def __str__(self):
return str(self.avg())
def __repr__(self):
return str(self.avg())
def populate_stats(data, items):
for item in items:
if (data.has_key(item) == False):
data[item] = Stat()
def save_sheet(s, path):
name = base
for p in path:
name += "_%s" % (p)
name += suffix
f = open(os.path.join(out_dir,name), "w+")
f.write(s)
f.close()
def process(filename):
f = open(filename, "r")
time = ["sys", "user", "elapsed"]
data = {}
for l in f.readlines():
s = l.split(",")
# cmd,lib,pwr,thd,sys,user,elapsed
offset = 4
e = get_or_create_path(data, s[0:offset])
populate_stats(e, time)
for i,t in enumerate(time):
e[t].add_sample(float(s[i+offset]))
# output files
for k1 in data.keys():
d1 = data[k1]
for k2 in d1.keys():
d2 = d1[k2]
for k3 in d2.keys():
d3 = d2[k3]
s = ""
s += "cmd=%s lib=%s power=%s\n" % (k1, k2, k3)
s += "threads,sys,user,elapsed\n"
kset = d3.keys()
kset.sort()
for thd in kset:
#print d3[thd]["sys"]
d4 = d3[thd]
s += "%s,%.3f,%.3f,%.3f\n" % (thd, d4["sys"].avg(), d4["user"].avg(), d4["elapsed"].avg())
save_sheet(s, [k1,k2,k3]);
if __name__ == "__main__":
filename = os.path.join(out_dir, base + suffix)
process(filename) | bsd-3-clause |
Nikea/VisTrails | vistrails/packages/spreadsheet/identifiers.py | 2 | 2038 | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
identifier = 'org.vistrails.vistrails.spreadsheet'
name = 'VisTrails Spreadsheet'
version = '0.9.3'
old_identifiers = ['edu.utah.sci.vistrails.spreadsheet']
| bsd-3-clause |
ximion/dak-dep11 | dak/dakdb/__init__.py | 10 | 1046 | """
Database update scripts for usage with B{dak update-db}
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2008 Michael Casadevall <mcasadevall@debian.org>
@license: GNU General Public License version 2 or later
Update scripts have to C{import psycopg2} and
C{from daklib.dak_exceptions import DBUpdateError}.
There has to be B{at least} the function C{do_update(self)} to be
defined. It should take all neccessary steps to update the
database. If the update fails the changes have to be rolled back and the
C{DBUpdateError} exception raised to properly halt the execution of any
other update.
Example::
def do_update(self):
print "Doing something"
try:
c = self.db.cursor()
c.execute("SOME SQL STATEMENT")
self.db.commit()
except psycopg2.ProgrammingError, msg:
self.db.rollback()
raise DBUpdateError, "Unable to do whatever, rollback issued. Error message : %s" % (str(msg))
This function can do whatever it wants and use everything from dak and
daklib.
"""
| gpl-2.0 |
eHealthAfrica/kivy | doc/sources/sphinxext/kivy_pygments_theme.py | 76 | 4901 | # kivy pygments style based on flask/tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class KivyStyle(Style):
# The background color is set in kivystyle.sty
background_color = ""
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #ffffff", # class: 'w'
Error: "#FF0000 border:#FF0000", # class: 'err'
Other: "#FF0000", # class 'x'
Comment: "italic #666385", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #000000", # class: 'k'
Keyword.Constant: "bold #000000", # class: 'kc'
Keyword.Declaration: "bold #000000", # class: 'kd'
Keyword.Namespace: "bold #000000", # class: 'kn'
Keyword.Pseudo: "bold #000000", # class: 'kp'
Keyword.Reserved: "bold #000000", # class: 'kr'
Keyword.Type: "bold #000000", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #000000", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#000000", # class: 'nb'
Name.Builtin.Pseudo: "#aa1105", # class: 'bp'
Name.Class: "#db6500", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#db6500", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#74171b", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #640000", # class: 'sd' - like a comment
String.Double: "#74171b", # class: 's2'
String.Escape: "#74171b", # class: 'se'
String.Heredoc: "#74171b", # class: 'sh'
String.Interpol: "#74171b", # class: 'si'
String.Other: "#74171b", # class: 'sx'
String.Regex: "#74171b", # class: 'sr'
String.Single: "#74171b", # class: 's1'
String.Symbol: "#74171b", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| mit |
pasiegel/SickGear | lib/httplib2/iri2uri.py | 885 | 3850 | """
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| gpl-3.0 |
mattdm/dnf | tests/cli/commands/test_repolist.py | 13 | 1450 | # Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
import dnf.cli.commands.repolist as repolist
import dnf.repo
import tests.support
class TestRepolist(tests.support.TestCase):
@tests.support.mock.patch('dnf.cli.commands.repolist._',
dnf.pycomp.NullTranslations().ugettext)
def test_expire_str(self):
repo = dnf.repo.Repo('rollup', None)
expire = repolist._expire_str(repo, None)
self.assertEqual(expire, '172800 second(s) (last: unknown)')
| gpl-2.0 |
liqi328/rjrepaircompany | django/views/debug.py | 152 | 33304 | import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.http import HttpResponse, HttpResponseServerError, HttpResponseNotFound
from django.template import (Template, Context, TemplateDoesNotExist,
TemplateSyntaxError)
from django.template.defaultfilters import force_escape, pprint
from django.utils.html import escape
from django.utils.importlib import import_module
from django.utils.encoding import smart_unicode, smart_str
HIDDEN_SETTINGS = re.compile('SECRET|PASSWORD|PROFANITIES_LIST|SIGNATURE')
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p+1
p = template_source.find('\n', p+1)
yield len(template_source) + 1
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = '********************'
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
return HttpResponseServerError(html, mimetype='text/html')
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, basestring):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_html(self):
"Return HTML code for traceback."
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
for loader in template_source_loaders:
try:
module = import_module(loader.__module__)
if hasattr(loader, '__class__'):
source_list_func = loader.get_template_sources
else: # NOTE: Remember to remove this branch when we deprecate old template loaders in 1.4
source_list_func = module.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{'name': t, 'exists': os.path.exists(t)} \
for t in source_list_func(str(self.exc_value))]
except (ImportError, AttributeError):
template_list = []
if hasattr(loader, '__class__'):
loader_name = loader.__module__ + '.' + loader.__class__.__name__
else: # NOTE: Remember to remove this branch when we deprecate old template loaders in 1.4
loader_name = loader.__module__ + '.' + loader.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'source') and
isinstance(self.exc_value, TemplateSyntaxError)):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_unicode(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context({
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
})
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_unicode(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append( (num, escape(template_source[upto:next])) )
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
self.template_info = {
'message': self.exc_value.args[0],
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
f = open(filename)
try:
source = f.readlines()
finally:
f.close()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(r'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1)
break
source = [unicode(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__')
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': tb.tb_frame.f_locals.items(),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [ (f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames ]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if not tried:
# tried exists but is an empty list. The URLconf must've been empty.
return empty_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': smart_str(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), mimetype='text/html')
def empty_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(EMPTY_URLCONF_TEMPLATE, name='Empty URLConf template')
c = Context({
'project_name': settings.SETTINGS_MODULE.split('.')[0]
})
return HttpResponse(t.render(c), mimetype='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; }
ul.traceback li.frame { padding-bottom:1em; }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#666; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:black; background-color:#ccc; }
div.context ol.context-line li span { position:absolute; right:32px; }
div.commands { margin-left: 40px; }
div.commands a { color:black; text-decoration:none; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2 : s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>{% for t in loader.templates %}<li><code>{{ t.name }}</code> (File {% if t.exists %}exists{% else %}does not exist{% endif %})</li>{% endfor %}</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Template error</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if request.POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
EMPTY_URLCONF_TEMPLATE = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
ul { margin-left: 2em; margin-top: 1em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>Of course, you haven't actually done any work yet. Here's what to do next:</p>
<ul>
<li>If you plan to use a database, edit the <code>DATABASES</code> setting in <code>{{ project_name }}/settings.py</code>.</li>
<li>Start your first app by running <code>python {{ project_name }}/manage.py startapp [appname]</code>.</li>
</ul>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 21 | 2637 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
def test_mmhash3_int():
assert murmurhash3_32(3) == 847579505
assert murmurhash3_32(3, seed=0) == 847579505
assert murmurhash3_32(3, seed=42) == -1823081949
assert murmurhash3_32(3, positive=False) == 847579505
assert murmurhash3_32(3, seed=0, positive=False) == 847579505
assert murmurhash3_32(3, seed=42, positive=False) == -1823081949
assert murmurhash3_32(3, positive=True) == 847579505
assert murmurhash3_32(3, seed=0, positive=True) == 847579505
assert murmurhash3_32(3, seed=42, positive=True) == 2471885347
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert murmurhash3_32(b'foo', 0) == -156908512
assert murmurhash3_32(b'foo', 42) == -1322301282
assert murmurhash3_32(b'foo', 0, positive=True) == 4138058784
assert murmurhash3_32(b'foo', 42, positive=True) == 2972666014
def test_mmhash3_unicode():
assert murmurhash3_32('foo', 0) == -156908512
assert murmurhash3_32('foo', 42) == -1322301282
assert murmurhash3_32('foo', 0, positive=True) == 4138058784
assert murmurhash3_32('foo', 42, positive=True) == 2972666014
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert h not in previous_hashes, \
"Found collision on growing empty string"
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.full(n_bins, 1. / n_bins)
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
hasteur/pywikibot_minimal | userinterfaces/wxpython_interface.py | 5 | 1983 | # -*- coding: utf-8 -*-
__version__ = '$Id: 139dc689ef6baf1ed224d300766f7b7ea658cb0f $'
import sys; sys.path.append('..')
import re
import terminal_interface
import wx
app = wx.App()
class UI(terminal_interface.UI):
def __init__(self):
pass
def input(self, question, password = False):
"""
Works like raw_input(), but returns a unicode string instead of ASCII.
Unlike raw_input, this function automatically adds a space after the
question.
"""
# TODO: hide input if password = True
self.output(question)
if password:
answer = wx.PasswordEntryDialog( None, question, '','')
else:
answer = wx.TextEntryDialog( None, question, '', '' )
answer.ShowModal()
self.output(answer+'\n')
#tkSimpleDialog.askstring('title', question)
return answer.GetValue() or ''
def inputChoice(self, question, options, hotkeys, default = None):
for i in range(len(options)):
option = options[i]
hotkey = hotkeys[i]
m = re.search('[%s%s]' % (hotkey.lower(), hotkey.upper()), option)
if m:
pos = m.start()
options[i] = '%s[%s]%s' % (option[:pos], option[pos], option[pos+1:])
else:
options[i] = '%s [%s]' % (option, hotkey)
while True:
prompt = '%s\n(%s)' % (question, ', '.join(options))
self.output('%s (%s)' % (question, ', '.join(options)))
answer = wx.TextEntryDialog(None, prompt, question, '')
answer.ShowModal()
answer = answer.GetValue()
self.output(answer+'\n')
if answer.lower() in hotkeys or answer.upper() in hotkeys:
return answer
elif default and answer=='':# empty string entered
return default
if __name__ == '__main__':
ui = UI()
print ui.input('Test?')
app.MainLoop()
| gpl-3.0 |
nagyistoce/photivo | scons-local-2.2.0/SCons/Sig.py | 14 | 2392 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Sig.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """Place-holder for the old SCons.Sig module hierarchy
This is no longer used, but code out there (such as the NSIS module on
the SCons wiki) may try to import SCons.Sig. If so, we generate a warning
that points them to the line that caused the import, and don't die.
If someone actually tried to use the sub-modules or functions within
the package (for example, SCons.Sig.MD5.signature()), then they'll still
get an AttributeError, but at least they'll know where to start looking.
"""
import SCons.Util
import SCons.Warnings
msg = 'The SCons.Sig module no longer exists.\n' \
' Remove the following "import SCons.Sig" line to eliminate this warning:'
SCons.Warnings.warn(SCons.Warnings.DeprecatedSigModuleWarning, msg)
default_calc = None
default_module = None
class MD5Null(SCons.Util.Null):
def __repr__(self):
return "MD5Null()"
class TimeStampNull(SCons.Util.Null):
def __repr__(self):
return "TimeStampNull()"
MD5 = MD5Null()
TimeStamp = TimeStampNull()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
nortikin/sverchok | old_nodes/formula3.py | 2 | 6928 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
from math import *
from collections import defaultdict
import bpy
from bpy.props import BoolProperty, StringProperty, EnumProperty, FloatVectorProperty, IntProperty
import json
import io
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, match_long_repeat, zip_long_repeat, throttle_and_update_node
from sverchok.utils import logging
from sverchok.utils.modules.eval_formula import get_variables, safe_eval
class SvFormulaNodeMk3(bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: Formula
Tooltip: Calculate by custom formula.
"""
bl_idname = 'SvFormulaNodeMk3'
bl_label = 'Formula'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_FORMULA'
replacement_nodes = [('SvFormulaNodeMk5', None, None)]
@throttle_and_update_node
def on_update(self, context):
self.adjust_sockets()
@throttle_and_update_node
def on_update_dims(self, context):
if self.dimensions < 4:
self.formula4 = ""
if self.dimensions < 3:
self.formula3 = ""
if self.dimensions < 2:
self.formula2 = ""
self.adjust_sockets()
dimensions : IntProperty(name="Dimensions", default=1, min=1, max=4, update=on_update_dims)
formula1 : StringProperty(default="x+y", update=on_update)
formula2 : StringProperty(update=on_update)
formula3 : StringProperty(update=on_update)
formula4 : StringProperty(update=on_update)
separate : BoolProperty(name="Separate", default=False, update=updateNode)
wrap : BoolProperty(name="Wrap", default=False, update=updateNode)
def formulas(self):
return [self.formula1, self.formula2, self.formula3, self.formula4]
def formula(self, k):
return self.formulas()[k]
def draw_buttons(self, context, layout):
layout.prop(self, "formula1", text="")
if self.dimensions > 1:
layout.prop(self, "formula2", text="")
if self.dimensions > 2:
layout.prop(self, "formula3", text="")
if self.dimensions > 3:
layout.prop(self, "formula4", text="")
row = layout.row()
row.prop(self, "separate")
row.prop(self, "wrap")
def draw_buttons_ext(self, context, layout):
layout.prop(self, "dimensions")
self.draw_buttons(context, layout)
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "x")
self.outputs.new('SvStringsSocket', "Result")
def get_variables(self):
variables = set()
for formula in self.formulas():
vs = get_variables(formula)
variables.update(vs)
return list(sorted(list(variables)))
def adjust_sockets(self):
variables = self.get_variables()
#self.debug("adjust_sockets:" + str(variables))
#self.debug("inputs:" + str(self.inputs.keys()))
for key in self.inputs.keys():
if (key not in variables) and (key in self.inputs):
self.debug("Input {} not in variables {}, remove it".format(key, str(variables)))
self.inputs.remove(self.inputs[key])
for v in variables:
if v not in self.inputs:
self.debug("Variable {} not in inputs {}, add it".format(v, str(self.inputs.keys())))
self.inputs.new('SvStringsSocket', v)
def sv_update(self):
'''
update analyzes the state of the node and returns if the criteria to start processing
are not met.
'''
if not any(len(formula) for formula in self.formulas()):
return
self.adjust_sockets()
def get_input(self):
variables = self.get_variables()
inputs = {}
for var in variables:
if var in self.inputs and self.inputs[var].is_linked:
inputs[var] = self.inputs[var].sv_get()
return inputs
def migrate_from(self, old_node):
if old_node.bl_idname == 'Formula2Node':
formula = old_node.formula
# Older formula node allowed only fixed set of
# variables, with names "x", "n[0]" .. "n[100]".
# Other names could not be considered valid.
k = -1
for socket in old_node.inputs:
name = socket.name
if k == -1: # First socket name was "x"
new_name = name
else: # Other names was "n[k]", which is syntactically not
# a valid python variable name.
# So we replace all occurences of "n[0]" in formula
# with "n0", and so on.
new_name = "n" + str(k)
logging.info("Replacing %s with %s", name, new_name)
formula = formula.replace(name, new_name)
k += 1
self.formula1 = formula
self.wrap = True
def process(self):
if not self.outputs[0].is_linked:
return
var_names = self.get_variables()
inputs = self.get_input()
results = []
if var_names:
input_values = [inputs.get(name, [[0]]) for name in var_names]
parameters = match_long_repeat(input_values)
else:
parameters = [[[None]]]
for objects in zip(*parameters):
object_results = []
for values in zip_long_repeat(*objects):
variables = dict(zip(var_names, values))
vector = []
for formula in self.formulas():
if formula:
value = safe_eval(formula, variables)
vector.append(value)
if self.separate:
object_results.append(vector)
else:
object_results.extend(vector)
results.append(object_results)
if self.wrap:
results = [results]
self.outputs['Result'].sv_set(results)
def register():
bpy.utils.register_class(SvFormulaNodeMk3)
def unregister():
bpy.utils.unregister_class(SvFormulaNodeMk3)
| gpl-3.0 |
lebabouin/CouchPotatoServer-develop | libs/apscheduler/events.py | 144 | 2529 | __all__ = ('EVENT_SCHEDULER_START', 'EVENT_SCHEDULER_SHUTDOWN',
'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED',
'EVENT_JOBSTORE_JOB_ADDED', 'EVENT_JOBSTORE_JOB_REMOVED',
'EVENT_JOB_EXECUTED', 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED',
'EVENT_ALL', 'SchedulerEvent', 'JobStoreEvent', 'JobEvent')
EVENT_SCHEDULER_START = 1 # The scheduler was started
EVENT_SCHEDULER_SHUTDOWN = 2 # The scheduler was shut down
EVENT_JOBSTORE_ADDED = 4 # A job store was added to the scheduler
EVENT_JOBSTORE_REMOVED = 8 # A job store was removed from the scheduler
EVENT_JOBSTORE_JOB_ADDED = 16 # A job was added to a job store
EVENT_JOBSTORE_JOB_REMOVED = 32 # A job was removed from a job store
EVENT_JOB_EXECUTED = 64 # A job was executed successfully
EVENT_JOB_ERROR = 128 # A job raised an exception during execution
EVENT_JOB_MISSED = 256 # A job's execution was missed
EVENT_ALL = (EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN |
EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED |
EVENT_JOBSTORE_JOB_ADDED | EVENT_JOBSTORE_JOB_REMOVED |
EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED)
class SchedulerEvent(object):
"""
An event that concerns the scheduler itself.
:var code: the type code of this event
"""
def __init__(self, code):
self.code = code
class JobStoreEvent(SchedulerEvent):
"""
An event that concerns job stores.
:var alias: the alias of the job store involved
:var job: the new job if a job was added
"""
def __init__(self, code, alias, job=None):
SchedulerEvent.__init__(self, code)
self.alias = alias
if job:
self.job = job
class JobEvent(SchedulerEvent):
"""
An event that concerns the execution of individual jobs.
:var job: the job instance in question
:var scheduled_run_time: the time when the job was scheduled to be run
:var retval: the return value of the successfully executed job
:var exception: the exception raised by the job
:var traceback: the traceback object associated with the exception
"""
def __init__(self, code, job, scheduled_run_time, retval=None,
exception=None, traceback=None):
SchedulerEvent.__init__(self, code)
self.job = job
self.scheduled_run_time = scheduled_run_time
self.retval = retval
self.exception = exception
self.traceback = traceback
| gpl-3.0 |
gandelman-a/neutron-lbaas | neutron_lbaas/db/migration/alembic_migrations/versions/lbaasv2_tls.py | 2 | 1818 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""lbaasv2 TLS
Revision ID: lbaasv2_tls
Revises: 364f9b6064f0
Create Date: 2015-01-18 10:00:00
"""
# revision identifiers, used by Alembic.
revision = 'lbaasv2_tls'
down_revision = '364f9b6064f0'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
old_listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP",
name="listener_protocolsv2")
new_listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP", "TERMINATED_HTTPS",
name="listener_protocolsv2")
def upgrade():
migration.alter_enum('lbaas_listeners', 'protocol', new_listener_protocols,
nullable=False)
op.create_table(
u'lbaas_sni',
sa.Column(u'listener_id', sa.String(36), nullable=False),
sa.Column(u'tls_container_id', sa.String(128), nullable=False),
sa.Column(u'position', sa.Integer),
sa.ForeignKeyConstraint(['listener_id'], [u'lbaas_listeners.id'], ),
sa.PrimaryKeyConstraint(u'listener_id', u'tls_container_id')
)
op.add_column('lbaas_listeners',
sa.Column(u'default_tls_container_id', sa.String(128),
nullable=True))
| apache-2.0 |
Jumpscale/web | pythonlib/gdata/tlslite/integration/ClientHelper.py | 285 | 7021 | """
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from gdata.tlslite.Checker import Checker
class ClientHelper:
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
self.tlsSession = None
def _handshake(self, tlsConnection):
if self.username and self.password:
tlsConnection.handshakeClientSRP(username=self.username,
password=self.password,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
elif self.username and self.sharedKey:
tlsConnection.handshakeClientSharedKey(username=self.username,
sharedKey=self.sharedKey,
settings=self.settings)
else:
tlsConnection.handshakeClientCert(certChain=self.certChain,
privateKey=self.privateKey,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
self.tlsSession = tlsConnection.session
| apache-2.0 |
BlueLens/bl-magi | tensorflow/object_detection/core/data_decoder.py | 23 | 1349 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interface for data decoders.
Data decoders decode the input data and return a dictionary of tensors keyed by
the entries in core.reader.Fields.
"""
from abc import ABCMeta
from abc import abstractmethod
class DataDecoder(object):
"""Interface for data decoders."""
__metaclass__ = ABCMeta
@abstractmethod
def decode(self, data):
"""Return a single image and associated labels.
Args:
data: a string tensor holding a serialized protocol buffer corresponding
to data for a single image.
Returns:
tensor_dict: a dictionary containing tensors. Possible keys are defined in
reader.Fields.
"""
pass
| apache-2.0 |
CPFDSoftware-Tony/gmv | utils/Mesa/Mesa-7.8.2/src/mesa/glapi/gen/glX_proto_common.py | 46 | 3187 | #!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import gl_XML, glX_XML
import string
class glx_proto_item_factory(glX_XML.glx_item_factory):
"""Factory to create GLX protocol oriented objects derived from gl_item."""
def create_item(self, name, element, context):
if name == "type":
return glx_proto_type(element, context)
else:
return glX_XML.glx_item_factory.create_item(self, name, element, context)
class glx_proto_type(gl_XML.gl_type):
def __init__(self, element, context):
gl_XML.gl_type.__init__(self, element, context)
self.glx_name = element.nsProp( "glx_name", None )
return
class glx_print_proto(gl_XML.gl_print_base):
def size_call(self, func, outputs_also = 0):
"""Create C code to calculate 'compsize'.
Creates code to calculate 'compsize'. If the function does
not need 'compsize' to be calculated, None will be
returned."""
compsize = None
for param in func.parameterIterator():
if outputs_also or not param.is_output:
if param.is_image():
[dim, w, h, d, junk] = param.get_dimensions()
compsize = '__glImageSize(%s, %s, %s, %s, %s, %s)' % (w, h, d, param.img_format, param.img_type, param.img_target)
if not param.img_send_null:
compsize = '(%s != NULL) ? %s : 0' % (param.name, compsize)
return compsize
elif len(param.count_parameter_list):
parameters = string.join( param.count_parameter_list, "," )
compsize = "__gl%s_size(%s)" % (func.name, parameters)
return compsize
return None
def emit_packet_size_calculation(self, f, bias):
# compsize is only used in the command size calculation if
# the function has a non-output parameter that has a non-empty
# counter_parameter_list.
compsize = self.size_call(f)
if compsize:
print ' const GLuint compsize = %s;' % (compsize)
if bias:
print ' const GLuint cmdlen = %s - %u;' % (f.command_length(), bias)
else:
print ' const GLuint cmdlen = %s;' % (f.command_length())
#print ''
return compsize
| gpl-3.0 |
jjlee3/openthread | tools/harness-automation/cases/sed_6_2_1.py | 16 | 1869 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class SED_6_2_1(HarnessCase):
role = HarnessCase.ROLE_SED
case = '6 2 1'
golden_devices_required = 2
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
pombreda/pydbgr | test/unit/test-lib-pp.py | 2 | 1146 | #!/usr/bin/env python
'Unit test for trepan.lib.pp'
import sys, unittest
from import_relative import import_relative
Mpp = import_relative('lib.pp', '...trepan')
class TestLibPrint(unittest.TestCase):
def setUp(self):
self.msgs = []
return
def msg_nocr(self, msg):
if len(self.msgs) > 0:
self.msgs[-1] += msg
else:
self.msgs += msg
pass
return
def msg(self, msg):
self.msgs += [msg]
return
def test_lib_pprint_simple_array(self):
def msg_nocr(self, m):
sys.stdout.write(m)
return
def msg(self, m): print(m)
Mpp.pprint_simple_array(list(range(50)), 50, self.msg_nocr, self.msg)
self.assertEqual(
['[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11', '',
' 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23',
' 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35',
' 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47',
' 48, 49]'],
self.msgs)
return
pass
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
wbc2010/django1.2.5 | django/contrib/gis/tests/relatedapp/models.py | 274 | 1686 | from django.contrib.gis.db import models
from django.contrib.localflavor.us.models import USStateField
class Location(models.Model):
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.point.wkt
class City(models.Model):
name = models.CharField(max_length=50)
state = USStateField()
location = models.ForeignKey(Location)
objects = models.GeoManager()
def __unicode__(self): return self.name
class AugmentedLocation(Location):
extra_text = models.TextField(blank=True)
objects = models.GeoManager()
class DirectoryEntry(models.Model):
listing_text = models.CharField(max_length=50)
location = models.ForeignKey(AugmentedLocation)
objects = models.GeoManager()
class Parcel(models.Model):
name = models.CharField(max_length=30)
city = models.ForeignKey(City)
center1 = models.PointField()
# Throwing a curveball w/`db_column` here.
center2 = models.PointField(srid=2276, db_column='mycenter')
border1 = models.PolygonField()
border2 = models.PolygonField(srid=2276)
objects = models.GeoManager()
def __unicode__(self): return self.name
# These use the GeoManager but do not have any geographic fields.
class Author(models.Model):
name = models.CharField(max_length=100)
objects = models.GeoManager()
class Article(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, unique=True)
objects = models.GeoManager()
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, related_name='books', null=True)
objects = models.GeoManager()
| bsd-3-clause |
ffsdmad/flask | flask/sessions.py | 142 | 14369 | # -*- coding: utf-8 -*-
"""
flask.sessions
~~~~~~~~~~~~~~
Implements cookie based sessions based on itsdangerous.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import uuid
import hashlib
from base64 import b64encode, b64decode
from datetime import datetime
from werkzeug.http import http_date, parse_date
from werkzeug.datastructures import CallbackDict
from . import Markup, json
from ._compat import iteritems, text_type
from itsdangerous import URLSafeTimedSerializer, BadSignature
def total_seconds(td):
return td.days * 60 * 60 * 24 + td.seconds
class SessionMixin(object):
"""Expands a basic dictionary with an accessors that are expected
by Flask extensions and users for the session.
"""
def _get_permanent(self):
return self.get('_permanent', False)
def _set_permanent(self, value):
self['_permanent'] = bool(value)
#: this reflects the ``'_permanent'`` key in the dict.
permanent = property(_get_permanent, _set_permanent)
del _get_permanent, _set_permanent
#: some session backends can tell you if a session is new, but that is
#: not necessarily guaranteed. Use with caution. The default mixin
#: implementation just hardcodes ``False`` in.
new = False
#: for some backends this will always be ``True``, but some backends will
#: default this to false and detect changes in the dictionary for as
#: long as changes do not happen on mutable structures in the session.
#: The default mixin implementation just hardcodes ``True`` in.
modified = True
def _tag(value):
if isinstance(value, tuple):
return {' t': [_tag(x) for x in value]}
elif isinstance(value, uuid.UUID):
return {' u': value.hex}
elif isinstance(value, bytes):
return {' b': b64encode(value).decode('ascii')}
elif callable(getattr(value, '__html__', None)):
return {' m': text_type(value.__html__())}
elif isinstance(value, list):
return [_tag(x) for x in value]
elif isinstance(value, datetime):
return {' d': http_date(value)}
elif isinstance(value, dict):
return dict((k, _tag(v)) for k, v in iteritems(value))
elif isinstance(value, str):
try:
return text_type(value)
except UnicodeError:
from flask.debughelpers import UnexpectedUnicodeError
raise UnexpectedUnicodeError(u'A byte string with '
u'non-ASCII data was passed to the session system '
u'which can only store unicode strings. Consider '
u'base64 encoding your string (String was %r)' % value)
return value
class TaggedJSONSerializer(object):
"""A customized JSON serializer that supports a few extra types that
we take for granted when serializing (tuples, markup objects, datetime).
"""
def dumps(self, value):
return json.dumps(_tag(value), separators=(',', ':'))
def loads(self, value):
def object_hook(obj):
if len(obj) != 1:
return obj
the_key, the_value = next(iteritems(obj))
if the_key == ' t':
return tuple(the_value)
elif the_key == ' u':
return uuid.UUID(the_value)
elif the_key == ' b':
return b64decode(the_value)
elif the_key == ' m':
return Markup(the_value)
elif the_key == ' d':
return parse_date(the_value)
return obj
return json.loads(value, object_hook=object_hook)
session_json_serializer = TaggedJSONSerializer()
class SecureCookieSession(CallbackDict, SessionMixin):
"""Base class for sessions based on signed cookies."""
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class NullSession(SecureCookieSession):
"""Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('The session is unavailable because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class SessionInterface(object):
"""The basic interface you have to implement in order to replace the
default session interface which uses werkzeug's securecookie
implementation. The only methods you have to implement are
:meth:`open_session` and :meth:`save_session`, the others have
useful defaults which you don't need to change.
The session object returned by the :meth:`open_session` method has to
provide a dictionary like interface plus the properties and methods
from the :class:`SessionMixin`. We recommend just subclassing a dict
and adding that mixin::
class Session(dict, SessionMixin):
pass
If :meth:`open_session` returns ``None`` Flask will call into
:meth:`make_null_session` to create a session that acts as replacement
if the session support cannot work because some requirement is not
fulfilled. The default :class:`NullSession` class that is created
will complain that the secret key was not set.
To replace the session interface on an application all you have to do
is to assign :attr:`flask.Flask.session_interface`::
app = Flask(__name__)
app.session_interface = MySessionInterface()
.. versionadded:: 0.8
"""
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
#: :meth:`is_null_session` method will perform a typecheck against
#: this type.
null_session_class = NullSession
#: A flag that indicates if the session interface is pickle based.
#: This can be used by flask extensions to make a decision in regards
#: to how to deal with the session object.
#:
#: .. versionadded:: 0.10
pickle_based = False
def make_null_session(self, app):
"""Creates a null session which acts as a replacement object if the
real session support could not be loaded due to a configuration
error. This mainly aids the user experience because the job of the
null session is to still support lookup without complaining but
modifications are answered with a helpful error message of what
failed.
This creates an instance of :attr:`null_session_class` by default.
"""
return self.null_session_class()
def is_null_session(self, obj):
"""Checks if a given object is a null session. Null sessions are
not asked to be saved.
This checks if the object is an instance of :attr:`null_session_class`
by default.
"""
return isinstance(obj, self.null_session_class)
def get_cookie_domain(self, app):
"""Helpful helper method that returns the cookie domain that should
be used for the session cookie if session cookies are used.
"""
if app.config['SESSION_COOKIE_DOMAIN'] is not None:
return app.config['SESSION_COOKIE_DOMAIN']
if app.config['SERVER_NAME'] is not None:
# chop off the port which is usually not supported by browsers
rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0]
# Google chrome does not like cookies set to .localhost, so
# we just go with no domain then. Flask documents anyways that
# cross domain cookies need a fully qualified domain name
if rv == '.localhost':
rv = None
# If we infer the cookie domain from the server name we need
# to check if we are in a subpath. In that case we can't
# set a cross domain cookie.
if rv is not None:
path = self.get_cookie_path(app)
if path != '/':
rv = rv.lstrip('.')
return rv
def get_cookie_path(self, app):
"""Returns the path for which the cookie should be valid. The
default implementation uses the value from the ``SESSION_COOKIE_PATH``
config var if it's set, and falls back to ``APPLICATION_ROOT`` or
uses ``/`` if it's ``None``.
"""
return app.config['SESSION_COOKIE_PATH'] or \
app.config['APPLICATION_ROOT'] or '/'
def get_cookie_httponly(self, app):
"""Returns True if the session cookie should be httponly. This
currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
config var.
"""
return app.config['SESSION_COOKIE_HTTPONLY']
def get_cookie_secure(self, app):
"""Returns True if the cookie should be secure. This currently
just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
"""
return app.config['SESSION_COOKIE_SECURE']
def get_expiration_time(self, app, session):
"""A helper method that returns an expiration date for the session
or ``None`` if the session is linked to the browser session. The
default implementation returns now + the permanent session
lifetime configured on the application.
"""
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
def should_set_cookie(self, app, session):
"""Indicates whether a cookie should be set now or not. This is
used by session backends to figure out if they should emit a
set-cookie header or not. The default behavior is controlled by
the ``SESSION_REFRESH_EACH_REQUEST`` config variable. If
it's set to ``False`` then a cookie is only set if the session is
modified, if set to ``True`` it's always set if the session is
permanent.
This check is usually skipped if sessions get deleted.
.. versionadded:: 1.0
"""
if session.modified:
return True
save_each = app.config['SESSION_REFRESH_EACH_REQUEST']
return save_each and session.permanent
def open_session(self, app, request):
"""This method has to be implemented and must either return ``None``
in case the loading failed because of a configuration error or an
instance of a session object which implements a dictionary like
interface + the methods and attributes on :class:`SessionMixin`.
"""
raise NotImplementedError()
def save_session(self, app, session, response):
"""This is called for actual sessions returned by :meth:`open_session`
at the end of the request. This is still called during a request
context so if you absolutely need access to the request you can do
that.
"""
raise NotImplementedError()
class SecureCookieSessionInterface(SessionInterface):
"""The default session interface that stores sessions in signed cookies
through the :mod:`itsdangerous` module.
"""
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
salt = 'cookie-session'
#: the hash function to use for the signature. The default is sha1
digest_method = staticmethod(hashlib.sha1)
#: the name of the itsdangerous supported key derivation. The default
#: is hmac.
key_derivation = 'hmac'
#: A python serializer for the payload. The default is a compact
#: JSON derived serializer with support for some extra Python types
#: such as datetime objects or tuples.
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app):
if not app.secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return URLSafeTimedSerializer(app.secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def open_session(self, app, request):
s = self.get_signing_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
# Delete case. If there is no session we bail early.
# If the session was modified to be empty we remove the
# whole cookie.
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain, path=path)
return
# Modification case. There are upsides and downsides to
# emitting a set-cookie header each request. The behavior
# is controlled by the :meth:`should_set_cookie` method
# which performs a quick check to figure out if the cookie
# should be set or not. This is controlled by the
# SESSION_REFRESH_EACH_REQUEST config flag as well as
# the permanent flag on the session itself.
if not self.should_set_cookie(app, session):
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=httponly,
domain=domain, path=path, secure=secure)
| bsd-3-clause |
sergiohgz/incubator-airflow | airflow/contrib/operators/file_to_gcs.py | 11 | 2717 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class FileToGoogleCloudStorageOperator(BaseOperator):
"""
Uploads a file to Google Cloud Storage
:param src: Path to the local file. (templated)
:type src: string
:param dst: Destination path within the specified bucket. (templated)
:type dst: string
:param bucket: The bucket to upload to. (templated)
:type bucket: string
:param google_cloud_storage_conn_id: The Airflow connection ID to upload with
:type google_cloud_storage_conn_id: string
:param mime_type: The mime-type string
:type mime_type: string
:param delegate_to: The account to impersonate, if any
:type delegate_to: string
"""
template_fields = ('src', 'dst', 'bucket')
@apply_defaults
def __init__(self,
src,
dst,
bucket,
google_cloud_storage_conn_id='google_cloud_default',
mime_type='application/octet-stream',
delegate_to=None,
*args,
**kwargs):
super(FileToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.src = src
self.dst = dst
self.bucket = bucket
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.mime_type = mime_type
self.delegate_to = delegate_to
def execute(self, context):
"""
Uploads the file to Google cloud storage
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.upload(
bucket=self.bucket,
object=self.dst,
mime_type=self.mime_type,
filename=self.src)
| apache-2.0 |
ccastell/Transfer-System | Website/env/lib/python3.5/site-packages/setuptools/config.py | 41 | 16088 | from __future__ import absolute_import, unicode_literals
import io
import os
import sys
from collections import defaultdict
from functools import partial
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.py26compat import import_module
from six import string_types
def read_configuration(
filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError(
'Configuration file %s does not exist.' % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options,
ignore_option_errors=ignore_option_errors)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers)
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
obj_alias = handler.section_prefix
target_obj = handler.target_obj
for option in handler.set_options:
getter = getattr(target_obj, 'get_%s' % option, None)
if getter is None:
value = getattr(target_obj, option)
else:
value = getter()
config_dict[obj_alias][option] = value
return config_dict
def parse_configuration(
distribution, command_options, ignore_option_errors=False):
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
meta = ConfigMetadataHandler(
distribution.metadata, command_options, ignore_option_errors)
meta.parse()
options = ConfigOptionsHandler(
distribution, command_options, ignore_option_errors)
options.parse()
return [meta, options]
class ConfigHandler(object):
"""Handles metadata supplied in configuration files."""
section_prefix = None
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(self, target_obj, options, ignore_option_errors=False):
sections = {}
section_prefix = self.section_prefix
for section_name, section_options in options.items():
if not section_name.startswith(section_prefix):
continue
section_name = section_name.replace(section_prefix, '').strip('.')
sections[section_name] = section_options
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = sections
self.set_options = []
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
'%s must provide .parsers property' % self.__class__.__name__)
def __setitem__(self, option_name, value):
unknown = tuple()
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
current_value = getattr(target_obj, option_name, unknown)
if current_value is unknown:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
skip_option = False
parser = self.parsers.get(option_name)
if parser:
try:
value = parser(value)
except Exception:
skip_option = True
if not self.ignore_option_errors:
raise
if skip_option:
return
setter = getattr(target_obj, 'set_%s' % option_name, None)
if setter is None:
setattr(target_obj, option_name, value)
else:
setter(value)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise DistutilsOptionError(
'Unable to parse option value to dict: %s' % value)
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _parse_file(cls, value):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
include: LICENSE
include: src/file.txt
:param str value:
:rtype: str
"""
if not isinstance(value, string_types):
return value
include_directive = 'file:'
if not value.startswith(include_directive):
return value
current_directory = os.getcwd()
filepath = value.replace(include_directive, '').strip()
filepath = os.path.abspath(filepath)
if not filepath.startswith(current_directory):
raise DistutilsOptionError(
'`file:` directive can not access %s' % filepath)
if os.path.isfile(filepath):
with io.open(filepath, encoding='utf-8') as f:
value = f.read()
return value
@classmethod
def _parse_attr(cls, value):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, '').strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
sys.path.insert(0, os.getcwd())
try:
module = import_module(module_name)
value = getattr(module, attr_name)
finally:
sys.path = sys.path[1:]
return value
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to values.
:param dict section_options:
:param callable values_parser:
:rtype: dict
"""
value = {}
values_parser = values_parser or (lambda val: val)
for key, (_, val) in section_options.items():
value[key] = values_parser(val)
return value
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for (name, (_, value)) in section_options.items():
try:
self[name] = value
except KeyError:
pass # Keep silent for a new option may appear anytime.
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = '_%s' % section_name
section_parser_method = getattr(
self,
# Dots in section names are tranlsated into dunderscores.
('parse_section%s' % method_postfix).replace('.', '__'),
None)
if section_parser_method is None:
raise DistutilsOptionError(
'Unsupported distribution option section: [%s.%s]' % (
self.section_prefix, section_name))
section_parser_method(section_options)
class ConfigMetadataHandler(ConfigHandler):
section_prefix = 'metadata'
aliases = {
'home_page': 'url',
'summary': 'description',
'classifier': 'classifiers',
'platform': 'platforms',
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = self._parse_file
return {
'platforms': parse_list,
'keywords': parse_list,
'provides': parse_list,
'requires': parse_list,
'obsoletes': parse_list,
'classifiers': self._get_parser_compound(parse_file, parse_list),
'license': parse_file,
'description': parse_file,
'long_description': parse_file,
'version': self._parse_version,
}
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_attr(value)
if callable(version):
version = version()
if not isinstance(version, string_types):
if hasattr(version, '__iter__'):
version = '.'.join(map(str, version))
else:
version = '%s' % version
return version
class ConfigOptionsHandler(ConfigHandler):
section_prefix = 'options'
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_list_semicolon = partial(self._parse_list, separator=';')
parse_bool = self._parse_bool
parse_dict = self._parse_dict
return {
'zip_safe': parse_bool,
'use_2to3': parse_bool,
'include_package_data': parse_bool,
'package_dir': parse_dict,
'use_2to3_fixers': parse_list,
'use_2to3_exclude_fixers': parse_list,
'convert_2to3_doctests': parse_list,
'scripts': parse_list,
'eager_resources': parse_list,
'dependency_links': parse_list,
'namespace_packages': parse_list,
'install_requires': parse_list_semicolon,
'setup_requires': parse_list_semicolon,
'tests_require': parse_list_semicolon,
'packages': self._parse_packages,
'entry_points': self._parse_file,
'py_modules': parse_list,
}
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directive = 'find:'
if not value.startswith(find_directive):
return self._parse_list(value)
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get('packages.find', {}))
from setuptools import find_packages
return find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(
section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v])
where = find_kwargs.get('where')
if where is not None:
find_kwargs['where'] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
root = parsed.get('*')
if root:
parsed[''] = root
del parsed['*']
return parsed
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self['exclude_package_data'] = self._parse_package_data(
section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=';')
self['extras_require'] = self._parse_section_to_dict(
section_options, parse_list)
| apache-2.0 |
alvarolopez/nova | nova/tests/functional/v3/test_personality.py | 27 | 1690 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.v3 import test_servers
from nova.tests.unit.image import fake
class PersonalitySampleJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-personality'
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def test_servers_post(self):
self._post_server(use_common_server_api_samples=False)
def test_servers_rebuild(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'glance_host': self._get_glance_host(),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::'
}
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild-req', subs)
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs.update(self._get_regexes())
self._verify_response('server-action-rebuild-resp',
subs, response, 202)
| apache-2.0 |
bmedx/modulestore | xmodule/modulestore/xml.py | 1 | 41091 | import hashlib
import itertools
import json
import logging
import os
import re
import sys
import glob
from collections import defaultdict
from cStringIO import StringIO
from fs.osfs import OSFS
from importlib import import_module
from lxml import etree
from path import Path as path
from contextlib import contextmanager
from lazy import lazy
from xmodule.error_module import ErrorDescriptor
from xmodule.default_module import TemplateDescriptorSystem
from xmodule.util.errortracker import make_error_tracker, exc_info_to_str
from xmodule.x_module import (
XMLParsingSystem, policy_key,
OpaqueKeyReader, AsideKeyGenerator, DEPRECATION_VSCOMPAT_EVENT
)
from xmodule.modulestore.xml_exporter import DEFAULT_CONTENT_FIELDS
from xmodule.modulestore import ModuleStoreEnum, ModuleStoreReadBase, LIBRARY_ROOT, COURSE_ROOT
from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location
from opaque_keys.edx.locator import CourseLocator, LibraryLocator, BlockUsageLocator
from xblock.field_data import DictFieldData
from xblock.runtime import DictKeyValueStore
from xblock.fields import ScopeIds
import platform_core.lib.dogstats_wrapper as dog_stats_api
from .exceptions import ItemNotFoundError
from .inheritance import compute_inherited_metadata, inheriting_field_data, InheritanceKeyValueStore
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
etree.set_default_parser(edx_xml_parser)
log = logging.getLogger(__name__)
class ImportSystem(XMLParsingSystem, TemplateDescriptorSystem):
def __init__(self, xmlstore, course_id, course_dir,
error_tracker,
load_error_modules=True, target_course_id=None, **kwargs):
"""
A class that handles loading from xml. Does some munging to ensure that
all elements have unique slugs.
xmlstore: the XMLModuleStore to store the loaded modules in
"""
self.unnamed = defaultdict(int) # category -> num of new url_names for that category
self.used_names = defaultdict(set) # category -> set of used url_names
# Adding the course_id as passed in for later reference rather than
# having to recombine the org/course/url_name
self.course_id = course_id
self.load_error_modules = load_error_modules
self.modulestore = xmlstore
def process_xml(xml):
"""Takes an xml string, and returns a XBlock created from
that xml.
"""
def make_name_unique(xml_data):
"""
Make sure that the url_name of xml_data is unique. If a previously loaded
unnamed descriptor stole this element's url_name, create a new one.
Removes 'slug' attribute if present, and adds or overwrites the 'url_name' attribute.
"""
# VS[compat]. Take this out once course conversion is done (perhaps leave the uniqueness check)
# tags that really need unique names--they store (or should store) state.
need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter',
'videosequence', 'poll_question', 'vertical')
attr = xml_data.attrib
tag = xml_data.tag
id = lambda x: x
# Things to try to get a name, in order (key, cleaning function, remove key after reading?)
lookups = [('url_name', id, False),
('slug', id, True),
('name', Location.clean, False),
('display_name', Location.clean, False)]
url_name = None
for key, clean, remove in lookups:
if key in attr:
url_name = clean(attr[key])
if remove:
del attr[key]
break
def looks_like_fallback(url_name):
"""Does this look like something that came from fallback_name()?"""
return (url_name is not None
and url_name.startswith(tag)
and re.search('[0-9a-fA-F]{12}$', url_name))
def fallback_name(orig_name=None):
"""Return the fallback name for this module. This is a function instead of a variable
because we want it to be lazy."""
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:import_system_fallback_name",
u"name:{}".format(orig_name),
)
)
if looks_like_fallback(orig_name):
# We're about to re-hash, in case something changed, so get rid of the tag_ and hash
orig_name = orig_name[len(tag) + 1:-12]
# append the hash of the content--the first 12 bytes should be plenty.
orig_name = "_" + orig_name if orig_name not in (None, "") else ""
xml_bytes = xml.encode('utf8')
return tag + orig_name + "_" + hashlib.sha1(xml_bytes).hexdigest()[:12]
# Fallback if there was nothing we could use:
if url_name is None or url_name == "":
url_name = fallback_name()
# Don't log a warning--we don't need this in the log. Do
# put it in the error tracker--content folks need to see it.
if tag in need_uniq_names:
error_tracker(u"PROBLEM: no name of any kind specified for {tag}. Student "
u"state will not be properly tracked for this module. Problem xml:"
u" '{xml}...'".format(tag=tag, xml=xml[:100]))
else:
# TODO (vshnayder): We may want to enable this once course repos are cleaned up.
# (or we may want to give up on the requirement for non-state-relevant issues...)
# error_tracker("WARNING: no name specified for module. xml='{0}...'".format(xml[:100]))
pass
# Make sure everything is unique
if url_name in self.used_names[tag]:
# Always complain about modules that store state. If it
# doesn't store state, don't complain about things that are
# hashed.
if tag in need_uniq_names:
msg = (u"Non-unique url_name in xml. This may break state tracking for content."
u" url_name={0}. Content={1}".format(url_name, xml[:100]))
error_tracker("PROBLEM: " + msg)
log.warning(msg)
# Just set name to fallback_name--if there are multiple things with the same fallback name,
# they are actually identical, so it's fragile, but not immediately broken.
# TODO (vshnayder): if the tag is a pointer tag, this will
# break the content because we won't have the right link.
# That's also a legitimate attempt to reuse the same content
# from multiple places. Once we actually allow that, we'll
# need to update this to complain about non-unique names for
# definitions, but allow multiple uses.
url_name = fallback_name(url_name)
self.used_names[tag].add(url_name)
xml_data.set('url_name', url_name)
try:
xml_data = etree.fromstring(xml)
make_name_unique(xml_data)
descriptor = self.xblock_from_node(
xml_data,
None, # parent_id
id_manager,
)
except Exception as err: # pylint: disable=broad-except
if not self.load_error_modules:
raise
# Didn't load properly. Fall back on loading as an error
# descriptor. This should never error due to formatting.
msg = "Error loading from xml. %s"
log.warning(
msg,
unicode(err)[:200],
# Normally, we don't want lots of exception traces in our logs from common
# content problems. But if you're debugging the xml loading code itself,
# uncomment the next line.
# exc_info=True
)
msg = msg % (unicode(err)[:200])
self.error_tracker(msg)
err_msg = msg + "\n" + exc_info_to_str(sys.exc_info())
descriptor = ErrorDescriptor.from_xml(
xml,
self,
id_manager,
err_msg
)
descriptor.data_dir = course_dir
if descriptor.scope_ids.usage_id in xmlstore.modules[course_id]:
# keep the parent pointer if any but allow everything else to overwrite
other_copy = xmlstore.modules[course_id][descriptor.scope_ids.usage_id]
descriptor.parent = other_copy.parent
if descriptor != other_copy:
log.warning("%s has more than one definition", descriptor.scope_ids.usage_id)
xmlstore.modules[course_id][descriptor.scope_ids.usage_id] = descriptor
if descriptor.has_children:
for child in descriptor.get_children():
# parent is alphabetically least
if child.parent is None or child.parent > descriptor.scope_ids.usage_id:
child.parent = descriptor.location
child.save()
# After setting up the descriptor, save any changes that we have
# made to attributes on the descriptor to the underlying KeyValueStore.
descriptor.save()
return descriptor
render_template = lambda template, context: u''
# TODO (vshnayder): we are somewhat architecturally confused in the loading code:
# load_item should actually be get_instance, because it expects the course-specific
# policy to be loaded. For now, just add the course_id here...
def load_item(usage_key, for_parent=None):
"""Return the XBlock for the specified location"""
return xmlstore.get_item(usage_key, for_parent=for_parent)
resources_fs = OSFS(xmlstore.data_dir / course_dir)
id_manager = CourseImportLocationManager(course_id, target_course_id)
super(ImportSystem, self).__init__(
load_item=load_item,
resources_fs=resources_fs,
render_template=render_template,
error_tracker=error_tracker,
process_xml=process_xml,
id_generator=id_manager,
id_reader=id_manager,
**kwargs
)
# id_generator is ignored, because each ImportSystem is already local to
# a course, and has it's own id_generator already in place
def add_node_as_child(self, block, node, id_generator):
child_block = self.process_xml(etree.tostring(node))
block.children.append(child_block.scope_ids.usage_id)
class CourseLocationManager(OpaqueKeyReader, AsideKeyGenerator):
"""
IdGenerator for Location-based definition ids and usage ids
based within a course
"""
def __init__(self, course_id):
super(CourseLocationManager, self).__init__()
self.course_id = course_id
self.autogen_ids = itertools.count(0)
def create_usage(self, def_id):
return def_id
def create_definition(self, block_type, slug=None):
assert block_type is not None
if slug is None:
slug = 'autogen_{}_{}'.format(block_type, self.autogen_ids.next())
return self.course_id.make_usage_key(block_type, slug)
def get_definition_id(self, usage_id):
"""Retrieve the definition that a usage is derived from.
Args:
usage_id: The id of the usage to query
Returns:
The `definition_id` the usage is derived from
"""
return usage_id
class CourseImportLocationManager(CourseLocationManager):
"""
IdGenerator for Location-based definition ids and usage ids
based within a course, for use during course import.
In addition to the functionality provided by CourseLocationManager,
this class also contains the target_course_id for the course import
process.
Note: This is a temporary solution to workaround the fact that
the from_xml method is passed the source course_id instead of the
target course_id in the import process. For a more ideal solution,
see https://openedx.atlassian.net/browse/MA-417 as a pending TODO.
"""
def __init__(self, course_id, target_course_id):
super(CourseImportLocationManager, self).__init__(course_id=course_id)
self.target_course_id = target_course_id
class XMLModuleStore(ModuleStoreReadBase):
"""
An XML backed ModuleStore
"""
parent_xml = COURSE_ROOT
def __init__(
self, data_dir, default_class=None, source_dirs=None, course_ids=None,
load_error_modules=True, i18n_service=None, fs_service=None, user_service=None,
signal_handler=None, target_course_id=None, **kwargs # pylint: disable=unused-argument
):
"""
Initialize an XMLModuleStore from data_dir
Args:
data_dir (str): path to data directory containing the course directories
default_class (str): dot-separated string defining the default descriptor
class to use if none is specified in entry_points
source_dirs or course_ids (list of str): If specified, the list of source_dirs or course_ids to load.
Otherwise, load all courses. Note, providing both
"""
super(XMLModuleStore, self).__init__(**kwargs)
self.data_dir = path(data_dir)
self.modules = defaultdict(dict) # course_id -> dict(location -> XBlock)
self.courses = {} # course_dir -> XBlock for the course
self.errored_courses = {} # course_dir -> errorlog, for dirs that failed to load
if course_ids is not None:
course_ids = [SlashSeparatedCourseKey.from_deprecated_string(course_id) for course_id in course_ids]
self.load_error_modules = load_error_modules
if default_class is None:
self.default_class = None
else:
module_path, _, class_name = default_class.rpartition('.')
class_ = getattr(import_module(module_path), class_name)
self.default_class = class_
# All field data will be stored in an inheriting field data.
self.field_data = inheriting_field_data(kvs=DictKeyValueStore())
self.i18n_service = i18n_service
self.fs_service = fs_service
self.user_service = user_service
# If we are specifically asked for missing courses, that should
# be an error. If we are asked for "all" courses, find the ones
# that have a course.xml. We sort the dirs in alpha order so we always
# read things in the same order (OS differences in load order have
# bitten us in the past.)
if source_dirs is None:
source_dirs = sorted([d for d in os.listdir(self.data_dir) if
os.path.exists(self.data_dir / d / self.parent_xml)])
for course_dir in source_dirs:
self.try_load_course(course_dir, course_ids, target_course_id)
def try_load_course(self, course_dir, course_ids=None, target_course_id=None):
'''
Load a course, keeping track of errors as we go along. If course_ids is not None,
then reject the course unless its id is in course_ids.
'''
# Special-case code here, since we don't have a location for the
# course before it loads.
# So, make a tracker to track load-time errors, then put in the right
# place after the course loads and we have its location
errorlog = make_error_tracker()
course_descriptor = None
try:
course_descriptor = self.load_course(course_dir, course_ids, errorlog.tracker, target_course_id)
except Exception as exc: # pylint: disable=broad-except
msg = "ERROR: Failed to load courselike '{0}': {1}".format(
course_dir.encode("utf-8"), unicode(exc)
)
log.exception(msg)
errorlog.tracker(msg)
self.errored_courses[course_dir] = errorlog
if course_descriptor is None:
pass
elif isinstance(course_descriptor, ErrorDescriptor):
# Didn't load course. Instead, save the errors elsewhere.
self.errored_courses[course_dir] = errorlog
else:
self.courses[course_dir] = course_descriptor
course_descriptor.parent = None
course_id = self.id_from_descriptor(course_descriptor)
self._course_errors[course_id] = errorlog
def __unicode__(self):
'''
String representation - for debugging
'''
return '<%s data_dir=%r, %d courselikes, %d modules>' % (
self.__class__.__name__, self.data_dir, len(self.courses), len(self.modules)
)
@staticmethod
def id_from_descriptor(descriptor):
"""
Grab the course ID from the descriptor
"""
return descriptor.id
def load_policy(self, policy_path, tracker):
"""
Attempt to read a course policy from policy_path. If the file
exists, but is invalid, log an error and return {}.
If the policy loads correctly, returns the deserialized version.
"""
if not os.path.exists(policy_path):
return {}
try:
with open(policy_path) as f:
return json.load(f)
except (IOError, ValueError) as err:
msg = "ERROR: loading courselike policy from {0}".format(policy_path)
tracker(msg)
log.warning(msg + " " + str(err))
return {}
def load_course(self, course_dir, course_ids, tracker, target_course_id=None):
"""
Load a course into this module store
course_path: Course directory name
returns a CourseDescriptor for the course
"""
log.debug('========> Starting courselike import from %s', course_dir)
with open(self.data_dir / course_dir / self.parent_xml) as course_file:
course_file = StringIO(course_file.read())
course_data = etree.parse(course_file, parser=edx_xml_parser).getroot()
org = course_data.get('org')
if org is None:
msg = ("No 'org' attribute set for courselike in {dir}. "
"Using default 'edx'".format(dir=course_dir))
log.warning(msg)
tracker(msg)
org = 'edx'
# Parent XML should be something like 'library.xml' or 'course.xml'
courselike_label = self.parent_xml.split('.')[0]
course = course_data.get(courselike_label)
if course is None:
msg = (
"No '{courselike_label}' attribute set for course in {dir}."
" Using default '{default}'".format(
courselike_label=courselike_label,
dir=course_dir,
default=course_dir
)
)
log.warning(msg)
tracker(msg)
course = course_dir
url_name = course_data.get('url_name', course_data.get('slug'))
if url_name:
policy_dir = self.data_dir / course_dir / 'policies' / url_name
policy_path = policy_dir / 'policy.json'
policy = self.load_policy(policy_path, tracker)
# VS[compat]: remove once courses use the policy dirs.
if policy == {}:
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:xml_load_course_policy_dir",
u"course:{}".format(course),
)
)
old_policy_path = self.data_dir / course_dir / 'policies' / '{0}.json'.format(url_name)
policy = self.load_policy(old_policy_path, tracker)
else:
policy = {}
# VS[compat] : 'name' is deprecated, but support it for now...
if course_data.get('name'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:xml_load_course_course_data_name",
u"course:{}".format(course_data.get('course')),
u"org:{}".format(course_data.get('org')),
u"name:{}".format(course_data.get('name')),
)
)
url_name = Location.clean(course_data.get('name'))
tracker("'name' is deprecated for module xml. Please use "
"display_name and url_name.")
else:
url_name = None
course_id = self.get_id(org, course, url_name)
if course_ids is not None and course_id not in course_ids:
return None
def get_policy(usage_id):
"""
Return the policy dictionary to be applied to the specified XBlock usage
"""
return policy.get(policy_key(usage_id), {})
services = {}
if self.i18n_service:
services['i18n'] = self.i18n_service
if self.fs_service:
services['fs'] = self.fs_service
if self.user_service:
services['user'] = self.user_service
system = ImportSystem(
xmlstore=self,
course_id=course_id,
course_dir=course_dir,
error_tracker=tracker,
load_error_modules=self.load_error_modules,
get_policy=get_policy,
mixins=self.xblock_mixins,
default_class=self.default_class,
select=self.xblock_select,
field_data=self.field_data,
services=services,
target_course_id=target_course_id,
)
course_descriptor = system.process_xml(etree.tostring(course_data, encoding='unicode'))
# If we fail to load the course, then skip the rest of the loading steps
if isinstance(course_descriptor, ErrorDescriptor):
return course_descriptor
self.content_importers(system, course_descriptor, course_dir, url_name)
log.debug('========> Done with courselike import from %s', course_dir)
return course_descriptor
def content_importers(self, system, course_descriptor, course_dir, url_name):
"""
Load all extra non-course content, and calculate metadata inheritance.
"""
# NOTE: The descriptors end up loading somewhat bottom up, which
# breaks metadata inheritance via get_children(). Instead
# (actually, in addition to, for now), we do a final inheritance pass
# after we have the course descriptor.
compute_inherited_metadata(course_descriptor)
# now import all pieces of course_info which is expected to be stored
# in <content_dir>/info or <content_dir>/info/<url_name>
self.load_extra_content(
system, course_descriptor, 'course_info',
self.data_dir / course_dir / 'info',
course_dir, url_name
)
# now import all static tabs which are expected to be stored in
# in <content_dir>/tabs or <content_dir>/tabs/<url_name>
self.load_extra_content(
system, course_descriptor, 'static_tab',
self.data_dir / course_dir / 'tabs',
course_dir, url_name
)
self.load_extra_content(
system, course_descriptor, 'custom_tag_template',
self.data_dir / course_dir / 'custom_tags',
course_dir, url_name
)
self.load_extra_content(
system, course_descriptor, 'about',
self.data_dir / course_dir / 'about',
course_dir, url_name
)
@staticmethod
def get_id(org, course, url_name):
"""
Validate and return an ID for a course if given org, course, and url_name.
"""
if not url_name:
raise ValueError("Can't load a course without a 'url_name' "
"(or 'name') set. Set url_name.")
# Have to use SlashSeparatedCourseKey here because it makes sure the same format is
# always used, preventing duplicate keys.
return SlashSeparatedCourseKey(org, course, url_name)
def load_extra_content(self, system, course_descriptor, category, base_dir, course_dir, url_name):
self._load_extra_content(system, course_descriptor, category, base_dir, course_dir)
# then look in a override folder based on the course run
if os.path.isdir(base_dir / url_name):
self._load_extra_content(system, course_descriptor, category, base_dir / url_name, course_dir)
def _import_field_content(self, course_descriptor, category, file_path):
"""
Import field data content for field other than 'data' or 'metadata' form json file and
return field data content as dictionary
"""
slug, location, data_content = None, None, None
try:
# try to read json file
# file_path format: {dirname}.{field_name}.json
dirname, field, file_suffix = file_path.split('/')[-1].split('.')
if file_suffix == 'json' and field not in DEFAULT_CONTENT_FIELDS:
slug = os.path.splitext(os.path.basename(dirname))[0]
location = course_descriptor.scope_ids.usage_id.replace(category=category, name=slug)
with open(file_path) as field_content_file:
field_data = json.load(field_content_file)
data_content = {field: field_data}
except (IOError, ValueError):
# ignore this exception
# only new exported courses which use content fields other than 'metadata' and 'data'
# will have this file '{dirname}.{field_name}.json'
data_content = None
return slug, location, data_content
def _load_extra_content(self, system, course_descriptor, category, content_path, course_dir):
"""
Import fields data content from files
"""
for filepath in glob.glob(content_path / '*'):
if not os.path.isfile(filepath):
continue
if filepath.endswith('~'): # skip *~ files
continue
with open(filepath) as f:
try:
if filepath.find('.json') != -1:
# json file with json data content
slug, loc, data_content = self._import_field_content(course_descriptor, category, filepath)
if data_content is None:
continue
else:
try:
# get and update data field in xblock runtime
module = system.load_item(loc)
for key, value in data_content.iteritems():
setattr(module, key, value)
module.save()
except ItemNotFoundError:
module = None
data_content['location'] = loc
data_content['category'] = category
else:
slug = os.path.splitext(os.path.basename(filepath))[0]
loc = course_descriptor.scope_ids.usage_id.replace(category=category, name=slug)
# html file with html data content
html = f.read().decode('utf-8')
try:
module = system.load_item(loc)
module.data = html
module.save()
except ItemNotFoundError:
module = None
data_content = {'data': html, 'location': loc, 'category': category}
if module is None:
module = system.construct_xblock(
category,
# We're loading a descriptor, so student_id is meaningless
# We also don't have separate notions of definition and usage ids yet,
# so we use the location for both
ScopeIds(None, category, loc, loc),
DictFieldData(data_content),
)
module.data_dir = course_dir
module.save()
self.modules[course_descriptor.id][module.scope_ids.usage_id] = module
except Exception as exc: # pylint: disable=broad-except
logging.exception("Failed to load %s. Skipping... \
Exception: %s", filepath, unicode(exc))
system.error_tracker("ERROR: " + unicode(exc))
def has_item(self, usage_key):
"""
Returns True if location exists in this ModuleStore.
"""
return usage_key in self.modules[usage_key.course_key]
def get_item(self, usage_key, depth=0, **kwargs):
"""
Returns an XBlock instance for the item for this UsageKey.
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
usage_key: a UsageKey that matches the module we are looking for.
"""
try:
return self.modules[usage_key.course_key][usage_key]
except KeyError:
raise ItemNotFoundError(usage_key)
def get_items(self, course_id, settings=None, content=None, revision=None, qualifiers=None, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_id
NOTE: don't use this to look for courses
as the course_id is required. Use get_courses.
Args:
course_id (CourseKey): the course identifier
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as qualifiers below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as qualifiers below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
Substring matching pass a regex object.
For this modulestore, ``name`` is another commonly provided key (Location based stores)
(but not revision!)
For this modulestore,
you can search dates by providing either a datetime for == (probably
useless) or a tuple (">"|"<" datetime) for after or before, etc.
"""
if revision == ModuleStoreEnum.RevisionOption.draft_only:
return []
items = []
qualifiers = qualifiers.copy() if qualifiers else {} # copy the qualifiers (destructively manipulated here)
category = qualifiers.pop('category', None)
name = qualifiers.pop('name', None)
def _block_matches_all(mod_loc, module):
if category and mod_loc.category != category:
return False
if name:
if isinstance(name, list):
# Support for passing a list as the name qualifier
if mod_loc.name not in name:
return False
elif mod_loc.name != name:
return False
return all(
self._block_matches(module, fields or {})
for fields in [settings, content, qualifiers]
)
for mod_loc, module in self.modules[course_id].iteritems():
if _block_matches_all(mod_loc, module):
items.append(module)
return items
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.locator.CourseLocator` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
return CourseLocator(org, course, run, deprecated=True)
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for this modulestore
that matches the supplied course_key.
"""
return BlockUsageLocator(course_key, 'course', course_key.run)
def get_courses(self, **kwargs):
"""
Returns a list of course descriptors. If there were errors on loading,
some of these may be ErrorDescriptors instead.
"""
return self.courses.values()
def get_course_summaries(self, **kwargs):
"""
Returns `self.get_courses()`. Use to list courses to the global staff user.
"""
return self.get_courses(**kwargs)
def get_errored_courses(self):
"""
Return a dictionary of course_dir -> [(msg, exception_str)], for each
course_dir where course loading failed.
"""
return dict((k, self.errored_courses[k].errors) for k in self.errored_courses)
def get_orphans(self, course_key, **kwargs):
"""
Get all of the xblocks in the given course which have no parents and are not of types which are
usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't
use children to point to their dependents.
"""
# here just to quell the abstractmethod. someone could write the impl if needed
raise NotImplementedError
def get_parent_location(self, location, **kwargs):
'''Find the location that is the parent of this location in this
course. Needed for path_to_location().
'''
block = self.get_item(location, 0)
return block.parent
def get_modulestore_type(self, course_key=None):
"""
Returns an enumeration-like type reflecting the type of this modulestore, per ModuleStoreEnum.Type
Args:
course_key: just for signature compatibility
"""
# return ModuleStoreEnum.Type.xml
return None
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course locations
"""
courses = self.get_courses()
return [course.location.course_key for course in courses if course.wiki_slug == wiki_slug]
def heartbeat(self):
"""
Ensure that every known course is loaded and ready to go. Really, just return b/c
if this gets called the __init__ finished which means the courses are loaded.
Returns the course count
"""
return {'xml': True}
@contextmanager
def branch_setting(self, branch_setting, course_id=None): # pylint: disable=unused-argument
"""
A context manager for temporarily setting the branch value for the store to the given branch_setting.
"""
if branch_setting != ModuleStoreEnum.Branch.published_only:
raise ValueError(u"Cannot set branch setting to {} on a ReadOnly store".format(branch_setting))
yield
def _find_course_asset(self, asset_key):
"""
For now this is not implemented, but others should feel free to implement using the asset.json
which export produces.
"""
log.warning("_find_course_asset request of XML modulestore - not implemented.")
return (None, None)
def find_asset_metadata(self, asset_key, **kwargs):
"""
For now this is not implemented, but others should feel free to implement using the asset.json
which export produces.
"""
log.warning("find_asset_metadata request of XML modulestore - not implemented.")
return None
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
"""
For now this is not implemented, but others should feel free to implement using the asset.json
which export produces.
"""
log.warning("get_all_asset_metadata request of XML modulestore - not implemented.")
return []
def fill_in_run(self, course_key):
"""
A no-op.
Added to simplify tests which use the XML-store directly.
"""
return course_key
class LibraryXMLModuleStore(XMLModuleStore):
"""
A modulestore for importing Libraries from XML.
"""
parent_xml = LIBRARY_ROOT
@staticmethod
def get_id(org, library, url_name):
"""
Create a LibraryLocator given an org and library. url_name is ignored, but left in
for compatibility with the parent signature.
"""
return LibraryLocator(org=org, library=library)
@staticmethod
def patch_descriptor_kvs(library_descriptor):
"""
Metadata inheritance can be done purely through XBlocks, but in the import phase
a root block with an InheritanceKeyValueStore is assumed to be at the top of the hierarchy.
This should change in the future, but as XBlocks don't have this KVS, we have to patch it
here manually.
"""
init_dict = {key: getattr(library_descriptor, key) for key in library_descriptor.fields.keys()}
# if set, invalidate '_unwrapped_field_data' so it will be reset
# the next time it will be called
lazy.invalidate(library_descriptor, '_unwrapped_field_data')
# pylint: disable=protected-access
library_descriptor._field_data = inheriting_field_data(InheritanceKeyValueStore(init_dict))
def content_importers(self, system, course_descriptor, course_dir, url_name):
"""
Handle Metadata inheritance for Libraries.
"""
self.patch_descriptor_kvs(course_descriptor)
compute_inherited_metadata(course_descriptor)
def get_library(self, library_id, depth=0, **kwargs): # pylint: disable=unused-argument
"""
Get a library from this modulestore or return None if it does not exist.
"""
assert isinstance(library_id, LibraryLocator)
for library in self.get_courses(**kwargs):
if library.location.library_key == library_id:
return library
return None
@staticmethod
def id_from_descriptor(descriptor):
"""
Get the Library Key from the Library descriptor.
"""
return descriptor.location.library_key
def get_orphans(self, course_key, **kwargs):
"""
Get all of the xblocks in the given course which have no parents and are not of types which are
usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't
use children to point to their dependents.
"""
# here just to quell the abstractmethod. someone could write the impl if needed
raise NotImplementedError
| apache-2.0 |
johnkeepmoving/oss-ftp | python27/win32/Lib/site-packages/requests/packages/chardet/jpcntx.py | 1777 | 19348 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
self.charset_name = "SHIFT_JIS"
def get_charset_name(self):
return self.charset_name
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self.charset_name = "CP932"
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
| mit |
2014c2g5/2015cda_g3_0421 | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/pool.py | 694 | 23263 | #
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Pool']
#
# Imports
#
import threading
import queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
#
# Code run by worker processes
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None):
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, )
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
return cleaned
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool()
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwds)`.
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
return self._map_async(func, iterable, mapstar, chunksize).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
return self._map_async(func, iterable, starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
'''
Asynchronous version of `apply()` method.
'''
if self._state != RUN:
raise ValueError("Pool not running")
result = ApplyResult(self._cache, callback, error_callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `map()` method.
'''
return self._map_async(func, iterable, mapstar, chunksize, callback,
error_callback)
def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((result._job, i, mapper, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_workers(pool):
thread = threading.current_thread()
# Keep maintaining workers until the cache gets drained, unless the pool
# is terminated.
while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
pool._maintain_pool()
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
debug('joining worker handler')
if threading.current_thread() is not worker_handler:
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join()
debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join()
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback, error_callback):
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._error_callback = error_callback
cache[self._job] = self
def ready(self):
return self._event.is_set()
def successful(self):
assert self.ready()
return self._success
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
if self._error_callback and not self._success:
self._error_callback(self._value)
self._event.set()
del self._cache[self._job]
AsyncResult = ApplyResult # create alias -- see #17805
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(self, cache, callback,
error_callback=error_callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
del self._cache[self._job]
self._event.set()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = queue.Queue()
self._outqueue = queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
| gpl-3.0 |
massot/odoo | addons/account/account_cash_statement.py | 283 | 15868 | # encoding: utf-8
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 PC Solutions (<http://pcsol.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools import float_compare
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_cashbox_line(osv.osv):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'pieces'
def _sub_total(self, cr, uid, ids, name, arg, context=None):
""" Calculates Sub total
@param name: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
for obj in self.browse(cr, uid, ids, context=context):
res[obj.id] = {
'subtotal_opening' : obj.pieces * obj.number_opening,
'subtotal_closing' : obj.pieces * obj.number_closing,
}
return res
def on_change_sub_opening(self, cr, uid, ids, pieces, number, *a):
""" Compute the subtotal for the opening """
return {'value' : {'subtotal_opening' : (pieces * number) or 0.0 }}
def on_change_sub_closing(self, cr, uid, ids, pieces, number, *a):
""" Compute the subtotal for the closing """
return {'value' : {'subtotal_closing' : (pieces * number) or 0.0 }}
_columns = {
'pieces': fields.float('Unit of Currency', digits_compute=dp.get_precision('Account')),
'number_opening' : fields.integer('Number of Units', help='Opening Unit Numbers'),
'number_closing' : fields.integer('Number of Units', help='Closing Unit Numbers'),
'subtotal_opening': fields.function(_sub_total, string='Opening Subtotal', type='float', digits_compute=dp.get_precision('Account'), multi='subtotal'),
'subtotal_closing': fields.function(_sub_total, string='Closing Subtotal', type='float', digits_compute=dp.get_precision('Account'), multi='subtotal'),
'bank_statement_id' : fields.many2one('account.bank.statement', ondelete='cascade'),
}
class account_cash_statement(osv.osv):
_inherit = 'account.bank.statement'
def _update_balances(self, cr, uid, ids, context=None):
"""
Set starting and ending balances according to pieces count
"""
res = {}
for statement in self.browse(cr, uid, ids, context=context):
if (statement.journal_id.type not in ('cash',)):
continue
if not statement.journal_id.cash_control:
prec = self.pool['decimal.precision'].precision_get(cr, uid, 'Account')
if float_compare(statement.balance_end_real, statement.balance_end, precision_digits=prec):
statement.write({'balance_end_real' : statement.balance_end})
continue
start = end = 0
for line in statement.details_ids:
start += line.subtotal_opening
end += line.subtotal_closing
data = {
'balance_start': start,
'balance_end_real': end,
}
res[statement.id] = data
super(account_cash_statement, self).write(cr, uid, [statement.id], data, context=context)
return res
def _get_sum_entry_encoding(self, cr, uid, ids, name, arg, context=None):
""" Find encoding total of statements "
@param name: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
for statement in self.browse(cr, uid, ids, context=context):
res[statement.id] = sum((line.amount for line in statement.line_ids), 0.0)
return res
def _get_company(self, cr, uid, context=None):
user_pool = self.pool.get('res.users')
company_pool = self.pool.get('res.company')
user = user_pool.browse(cr, uid, uid, context=context)
company_id = user.company_id
if not company_id:
company_id = company_pool.search(cr, uid, [])
return company_id and company_id[0] or False
def _get_statement_from_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.bank.statement.line').browse(cr, uid, ids, context=context):
result[line.statement_id.id] = True
return result.keys()
def _compute_difference(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0.0)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = obj.balance_end_real - obj.balance_end
return result
def _compute_last_closing_balance(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0.0)
for obj in self.browse(cr, uid, ids, context=context):
if obj.state == 'draft':
statement_ids = self.search(cr, uid,
[('journal_id', '=', obj.journal_id.id),('state', '=', 'confirm')],
order='create_date desc',
limit=1,
context=context
)
if not statement_ids:
continue
else:
st = self.browse(cr, uid, statement_ids[0], context=context)
result[obj.id] = st.balance_end_real
return result
def onchange_journal_id(self, cr, uid, ids, journal_id, context=None):
result = super(account_cash_statement, self).onchange_journal_id(cr, uid, ids, journal_id)
if not journal_id:
return result
statement_ids = self.search(cr, uid,
[('journal_id', '=', journal_id),('state', '=', 'confirm')],
order='create_date desc',
limit=1,
context=context
)
opening_details_ids = self._get_cash_open_box_lines(cr, uid, journal_id, context)
if opening_details_ids:
result['value']['opening_details_ids'] = opening_details_ids
if not statement_ids:
return result
st = self.browse(cr, uid, statement_ids[0], context=context)
result.setdefault('value', {}).update({'last_closing_balance' : st.balance_end_real})
return result
_columns = {
'total_entry_encoding': fields.function(_get_sum_entry_encoding, string="Total Transactions",
store = {
'account.bank.statement': (lambda self, cr, uid, ids, context=None: ids, ['line_ids','move_line_ids'], 10),
'account.bank.statement.line': (_get_statement_from_line, ['amount'], 10),
},
help="Total of cash transaction lines."),
'closing_date': fields.datetime("Closed On"),
'details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='CashBox Lines', copy=True),
'opening_details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='Opening Cashbox Lines'),
'closing_details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='Closing Cashbox Lines'),
'user_id': fields.many2one('res.users', 'Responsible', required=False),
'difference' : fields.function(_compute_difference, method=True, string="Difference", type="float", help="Difference between the theoretical closing balance and the real closing balance."),
'last_closing_balance' : fields.function(_compute_last_closing_balance, method=True, string='Last Closing Balance', type='float'),
}
_defaults = {
'state': 'draft',
'date': lambda self, cr, uid, context={}: context.get('date', time.strftime("%Y-%m-%d %H:%M:%S")),
'user_id': lambda self, cr, uid, context=None: uid,
}
def _get_cash_open_box_lines(self, cr, uid, journal_id, context):
details_ids = []
if not journal_id:
return details_ids
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal and (journal.type == 'cash'):
last_pieces = None
if journal.with_last_closing_balance == True:
domain = [('journal_id', '=', journal.id),
('state', '=', 'confirm')]
last_bank_statement_ids = self.search(cr, uid, domain, limit=1, order='create_date desc', context=context)
if last_bank_statement_ids:
last_bank_statement = self.browse(cr, uid, last_bank_statement_ids[0], context=context)
last_pieces = dict(
(line.pieces, line.number_closing) for line in last_bank_statement.details_ids
)
for value in journal.cashbox_line_ids:
nested_values = {
'number_closing' : 0,
'number_opening' : last_pieces.get(value.pieces, 0) if isinstance(last_pieces, dict) else 0,
'pieces' : value.pieces
}
details_ids.append([0, False, nested_values])
return details_ids
def create(self, cr, uid, vals, context=None):
journal_id = vals.get('journal_id')
if journal_id and not vals.get('opening_details_ids'):
vals['opening_details_ids'] = vals.get('opening_details_ids') or self._get_cash_open_box_lines(cr, uid, journal_id, context)
res_id = super(account_cash_statement, self).create(cr, uid, vals, context=context)
self._update_balances(cr, uid, [res_id], context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
"""
Update redord(s) comes in {ids}, with new value comes as {vals}
return True on success, False otherwise
@param cr: cursor to database
@param user: id of current user
@param ids: list of record ids to be update
@param vals: dict of new values to be set
@param context: context arguments, like lang, time zone
@return: True on success, False otherwise
"""
if vals.get('journal_id', False):
cashbox_line_obj = self.pool.get('account.cashbox.line')
cashbox_ids = cashbox_line_obj.search(cr, uid, [('bank_statement_id', 'in', ids)], context=context)
cashbox_line_obj.unlink(cr, uid, cashbox_ids, context)
res = super(account_cash_statement, self).write(cr, uid, ids, vals, context=context)
self._update_balances(cr, uid, ids, context)
return res
def _user_allow(self, cr, uid, statement_id, context=None):
return True
def button_open(self, cr, uid, ids, context=None):
""" Changes statement state to Running.
@return: True
"""
obj_seq = self.pool.get('ir.sequence')
if context is None:
context = {}
statement_pool = self.pool.get('account.bank.statement')
for statement in statement_pool.browse(cr, uid, ids, context=context):
vals = {}
if not self._user_allow(cr, uid, statement.id, context=context):
raise osv.except_osv(_('Error!'), (_('You do not have rights to open this %s journal!') % (statement.journal_id.name, )))
if statement.name and statement.name == '/':
c = {'fiscalyear_id': statement.period_id.fiscalyear_id.id}
if statement.journal_id.sequence_id:
st_number = obj_seq.next_by_id(cr, uid, statement.journal_id.sequence_id.id, context=c)
else:
st_number = obj_seq.next_by_code(cr, uid, 'account.cash.statement', context=c)
vals.update({
'name': st_number
})
vals.update({
'state': 'open',
})
self.write(cr, uid, [statement.id], vals, context=context)
return True
def statement_close(self, cr, uid, ids, journal_type='bank', context=None):
if journal_type == 'bank':
return super(account_cash_statement, self).statement_close(cr, uid, ids, journal_type, context)
vals = {
'state':'confirm',
'closing_date': time.strftime("%Y-%m-%d %H:%M:%S")
}
return self.write(cr, uid, ids, vals, context=context)
def check_status_condition(self, cr, uid, state, journal_type='bank'):
if journal_type == 'bank':
return super(account_cash_statement, self).check_status_condition(cr, uid, state, journal_type)
return state=='open'
def button_confirm_cash(self, cr, uid, ids, context=None):
absl_proxy = self.pool.get('account.bank.statement.line')
TABLES = ((_('Profit'), 'profit_account_id'), (_('Loss'), 'loss_account_id'),)
for obj in self.browse(cr, uid, ids, context=context):
if obj.difference == 0.0:
continue
elif obj.difference < 0.0:
account = obj.journal_id.loss_account_id
name = _('Loss')
if not obj.journal_id.loss_account_id:
raise osv.except_osv(_('Error!'), _('There is no Loss Account on the journal %s.') % (obj.journal_id.name,))
else: # obj.difference > 0.0
account = obj.journal_id.profit_account_id
name = _('Profit')
if not obj.journal_id.profit_account_id:
raise osv.except_osv(_('Error!'), _('There is no Profit Account on the journal %s.') % (obj.journal_id.name,))
values = {
'statement_id' : obj.id,
'journal_id' : obj.journal_id.id,
'account_id' : account.id,
'amount' : obj.difference,
'name' : name,
}
absl_proxy.create(cr, uid, values, context=context)
return super(account_cash_statement, self).button_confirm_bank(cr, uid, ids, context=context)
class account_journal(osv.osv):
_inherit = 'account.journal'
def _default_cashbox_line_ids(self, cr, uid, context=None):
# Return a list of coins in Euros.
result = [
dict(pieces=value) for value in [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500]
]
return result
_columns = {
'cashbox_line_ids' : fields.one2many('account.journal.cashbox.line', 'journal_id', 'CashBox', copy=True),
}
_defaults = {
'cashbox_line_ids' : _default_cashbox_line_ids,
}
class account_journal_cashbox_line(osv.osv):
_name = 'account.journal.cashbox.line'
_rec_name = 'pieces'
_columns = {
'pieces': fields.float('Values', digits_compute=dp.get_precision('Account')),
'journal_id' : fields.many2one('account.journal', 'Journal', required=True, select=1, ondelete="cascade"),
}
_order = 'pieces asc'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
oliverlee/sympy | sympy/simplify/tests/test_radsimp.py | 54 | 15672 | from sympy import (
sqrt, Derivative, symbols, collect, Function, factor, Wild, S,
collect_const, log, fraction, I, cos, Add, O,sin, rcollect,
Mul, radsimp, diff, root, Symbol, Rational, exp)
from sympy.core.mul import _unevaluated_Mul as umul
from sympy.simplify.radsimp import _unevaluated_Add, collect_sqrt, fraction_expand
from sympy.utilities.pytest import XFAIL
from sympy.abc import x, y, z, t, a, b, c, d, e, f, g, h, i, k
def test_radsimp():
r2 = sqrt(2)
r3 = sqrt(3)
r5 = sqrt(5)
r7 = sqrt(7)
assert fraction(radsimp(1/r2)) == (sqrt(2), 2)
assert radsimp(1/(1 + r2)) == \
-1 + sqrt(2)
assert radsimp(1/(r2 + r3)) == \
-sqrt(2) + sqrt(3)
assert fraction(radsimp(1/(1 + r2 + r3))) == \
(-sqrt(6) + sqrt(2) + 2, 4)
assert fraction(radsimp(1/(r2 + r3 + r5))) == \
(-sqrt(30) + 2*sqrt(3) + 3*sqrt(2), 12)
assert fraction(radsimp(1/(1 + r2 + r3 + r5))) == (
(-34*sqrt(10) - 26*sqrt(15) - 55*sqrt(3) - 61*sqrt(2) + 14*sqrt(30) +
93 + 46*sqrt(6) + 53*sqrt(5), 71))
assert fraction(radsimp(1/(r2 + r3 + r5 + r7))) == (
(-50*sqrt(42) - 133*sqrt(5) - 34*sqrt(70) - 145*sqrt(3) + 22*sqrt(105)
+ 185*sqrt(2) + 62*sqrt(30) + 135*sqrt(7), 215))
z = radsimp(1/(1 + r2/3 + r3/5 + r5 + r7))
assert len((3616791619821680643598*z).args) == 16
assert radsimp(1/z) == 1/z
assert radsimp(1/z, max_terms=20).expand() == 1 + r2/3 + r3/5 + r5 + r7
assert radsimp(1/(r2*3)) == \
sqrt(2)/6
assert radsimp(1/(r2*a + r3 + r5 + r7)) == (
(8*sqrt(2)*a**7 - 8*sqrt(7)*a**6 - 8*sqrt(5)*a**6 - 8*sqrt(3)*a**6 -
180*sqrt(2)*a**5 + 8*sqrt(30)*a**5 + 8*sqrt(42)*a**5 + 8*sqrt(70)*a**5
- 24*sqrt(105)*a**4 + 84*sqrt(3)*a**4 + 100*sqrt(5)*a**4 +
116*sqrt(7)*a**4 - 72*sqrt(70)*a**3 - 40*sqrt(42)*a**3 -
8*sqrt(30)*a**3 + 782*sqrt(2)*a**3 - 462*sqrt(3)*a**2 -
302*sqrt(7)*a**2 - 254*sqrt(5)*a**2 + 120*sqrt(105)*a**2 -
795*sqrt(2)*a - 62*sqrt(30)*a + 82*sqrt(42)*a + 98*sqrt(70)*a -
118*sqrt(105) + 59*sqrt(7) + 295*sqrt(5) + 531*sqrt(3))/(16*a**8 -
480*a**6 + 3128*a**4 - 6360*a**2 + 3481))
assert radsimp(1/(r2*a + r2*b + r3 + r7)) == (
(sqrt(2)*a*(a + b)**2 - 5*sqrt(2)*a + sqrt(42)*a + sqrt(2)*b*(a +
b)**2 - 5*sqrt(2)*b + sqrt(42)*b - sqrt(7)*(a + b)**2 - sqrt(3)*(a +
b)**2 - 2*sqrt(3) + 2*sqrt(7))/(2*a**4 + 8*a**3*b + 12*a**2*b**2 -
20*a**2 + 8*a*b**3 - 40*a*b + 2*b**4 - 20*b**2 + 8))
assert radsimp(1/(r2*a + r2*b + r2*c + r2*d)) == \
sqrt(2)/(2*a + 2*b + 2*c + 2*d)
assert radsimp(1/(1 + r2*a + r2*b + r2*c + r2*d)) == (
(sqrt(2)*a + sqrt(2)*b + sqrt(2)*c + sqrt(2)*d - 1)/(2*a**2 + 4*a*b +
4*a*c + 4*a*d + 2*b**2 + 4*b*c + 4*b*d + 2*c**2 + 4*c*d + 2*d**2 - 1))
assert radsimp((y**2 - x)/(y - sqrt(x))) == \
sqrt(x) + y
assert radsimp(-(y**2 - x)/(y - sqrt(x))) == \
-(sqrt(x) + y)
assert radsimp(1/(1 - I + a*I)) == \
(-I*a + 1 + I)/(a**2 - 2*a + 2)
assert radsimp(1/((-x + y)*(x - sqrt(y)))) == \
(-x - sqrt(y))/((x - y)*(x**2 - y))
e = (3 + 3*sqrt(2))*x*(3*x - 3*sqrt(y))
assert radsimp(e) == x*(3 + 3*sqrt(2))*(3*x - 3*sqrt(y))
assert radsimp(1/e) == (
(-9*x + 9*sqrt(2)*x - 9*sqrt(y) + 9*sqrt(2)*sqrt(y))/(9*x*(9*x**2 -
9*y)))
assert radsimp(1 + 1/(1 + sqrt(3))) == \
Mul(S.Half, -1 + sqrt(3), evaluate=False) + 1
A = symbols("A", commutative=False)
assert radsimp(x**2 + sqrt(2)*x**2 - sqrt(2)*x*A) == \
x**2 + sqrt(2)*x**2 - sqrt(2)*x*A
assert radsimp(1/sqrt(5 + 2 * sqrt(6))) == -sqrt(2) + sqrt(3)
assert radsimp(1/sqrt(5 + 2 * sqrt(6))**3) == -(-sqrt(3) + sqrt(2))**3
# issue 6532
assert fraction(radsimp(1/sqrt(x))) == (sqrt(x), x)
assert fraction(radsimp(1/sqrt(2*x + 3))) == (sqrt(2*x + 3), 2*x + 3)
assert fraction(radsimp(1/sqrt(2*(x + 3)))) == (sqrt(2*x + 6), 2*x + 6)
# issue 5994
e = S('-(2 + 2*sqrt(2) + 4*2**(1/4))/'
'(1 + 2**(3/4) + 3*2**(1/4) + 3*sqrt(2))')
assert radsimp(e).expand() == -2*2**(S(3)/4) - 2*2**(S(1)/4) + 2 + 2*sqrt(2)
# issue 5986 (modifications to radimp didn't initially recognize this so
# the test is included here)
assert radsimp(1/(-sqrt(5)/2 - S(1)/2 + (-sqrt(5)/2 - S(1)/2)**2)) == 1
# from issue 5934
eq = (
(-240*sqrt(2)*sqrt(sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) -
360*sqrt(2)*sqrt(-8*sqrt(5) + 40)*sqrt(-sqrt(5) + 5) -
120*sqrt(10)*sqrt(-8*sqrt(5) + 40)*sqrt(-sqrt(5) + 5) +
120*sqrt(2)*sqrt(-sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) +
120*sqrt(2)*sqrt(-8*sqrt(5) + 40)*sqrt(sqrt(5) + 5) +
120*sqrt(10)*sqrt(-sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) +
120*sqrt(10)*sqrt(-8*sqrt(5) + 40)*sqrt(sqrt(5) + 5))/(-36000 -
7200*sqrt(5) + (12*sqrt(10)*sqrt(sqrt(5) + 5) +
24*sqrt(10)*sqrt(-sqrt(5) + 5))**2))
assert radsimp(eq) is S.NaN # it's 0/0
# work with normal form
e = 1/sqrt(sqrt(7)/7 + 2*sqrt(2) + 3*sqrt(3) + 5*sqrt(5)) + 3
assert radsimp(e) == (
-sqrt(sqrt(7) + 14*sqrt(2) + 21*sqrt(3) +
35*sqrt(5))*(-11654899*sqrt(35) - 1577436*sqrt(210) - 1278438*sqrt(15)
- 1346996*sqrt(10) + 1635060*sqrt(6) + 5709765 + 7539830*sqrt(14) +
8291415*sqrt(21))/1300423175 + 3)
# obey power rules
base = sqrt(3) - sqrt(2)
assert radsimp(1/base**3) == (sqrt(3) + sqrt(2))**3
assert radsimp(1/(-base)**3) == -(sqrt(2) + sqrt(3))**3
assert radsimp(1/(-base)**x) == (-base)**(-x)
assert radsimp(1/base**x) == (sqrt(2) + sqrt(3))**x
assert radsimp(root(1/(-1 - sqrt(2)), -x)) == (-1)**(-1/x)*(1 + sqrt(2))**(1/x)
# recurse
e = cos(1/(1 + sqrt(2)))
assert radsimp(e) == cos(-sqrt(2) + 1)
assert radsimp(e/2) == cos(-sqrt(2) + 1)/2
assert radsimp(1/e) == 1/cos(-sqrt(2) + 1)
assert radsimp(2/e) == 2/cos(-sqrt(2) + 1)
assert fraction(radsimp(e/sqrt(x))) == (sqrt(x)*cos(-sqrt(2)+1), x)
# test that symbolic denominators are not processed
r = 1 + sqrt(2)
assert radsimp(x/r, symbolic=False) == -x*(-sqrt(2) + 1)
assert radsimp(x/(y + r), symbolic=False) == x/(y + 1 + sqrt(2))
assert radsimp(x/(y + r)/r, symbolic=False) == \
-x*(-sqrt(2) + 1)/(y + 1 + sqrt(2))
# issue 7408
eq = sqrt(x)/sqrt(y)
assert radsimp(eq) == umul(sqrt(x), sqrt(y), 1/y)
assert radsimp(eq, symbolic=False) == eq
# issue 7498
assert radsimp(sqrt(x)/sqrt(y)**3) == umul(sqrt(x), sqrt(y**3), 1/y**3)
# for coverage
eq = sqrt(x)/y**2
assert radsimp(eq) == eq
def test_radsimp_issue_3214():
c, p = symbols('c p', positive=True)
s = sqrt(c**2 - p**2)
b = (c + I*p - s)/(c + I*p + s)
assert radsimp(b) == -I*(c + I*p - sqrt(c**2 - p**2))**2/(2*c*p)
def test_collect_1():
"""Collect with respect to a Symbol"""
x, y, z, n = symbols('x,y,z,n')
assert collect( x + y*x, x ) == x * (1 + y)
assert collect( x + x**2, x ) == x + x**2
assert collect( x**2 + y*x**2, x ) == (x**2)*(1 + y)
assert collect( x**2 + y*x, x ) == x*y + x**2
assert collect( 2*x**2 + y*x**2 + 3*x*y, [x] ) == x**2*(2 + y) + 3*x*y
assert collect( 2*x**2 + y*x**2 + 3*x*y, [y] ) == 2*x**2 + y*(x**2 + 3*x)
assert collect( ((1 + y + x)**4).expand(), x) == ((1 + y)**4).expand() + \
x*(4*(1 + y)**3).expand() + x**2*(6*(1 + y)**2).expand() + \
x**3*(4*(1 + y)).expand() + x**4
# symbols can be given as any iterable
expr = x + y
assert collect(expr, expr.free_symbols) == expr
def test_collect_2():
"""Collect with respect to a sum"""
a, b, x = symbols('a,b,x')
assert collect(a*(cos(x) + sin(x)) + b*(cos(x) + sin(x)),
sin(x) + cos(x)) == (a + b)*(cos(x) + sin(x))
def test_collect_3():
"""Collect with respect to a product"""
a, b, c = symbols('a,b,c')
f = Function('f')
x, y, z, n = symbols('x,y,z,n')
assert collect(-x/8 + x*y, -x) == x*(y - S(1)/8)
assert collect( 1 + x*(y**2), x*y ) == 1 + x*(y**2)
assert collect( x*y + a*x*y, x*y) == x*y*(1 + a)
assert collect( 1 + x*y + a*x*y, x*y) == 1 + x*y*(1 + a)
assert collect(a*x*f(x) + b*(x*f(x)), x*f(x)) == x*(a + b)*f(x)
assert collect(a*x*log(x) + b*(x*log(x)), x*log(x)) == x*(a + b)*log(x)
assert collect(a*x**2*log(x)**2 + b*(x*log(x))**2, x*log(x)) == \
x**2*log(x)**2*(a + b)
# with respect to a product of three symbols
assert collect(y*x*z + a*x*y*z, x*y*z) == (1 + a)*x*y*z
def test_collect_4():
"""Collect with respect to a power"""
a, b, c, x = symbols('a,b,c,x')
assert collect(a*x**c + b*x**c, x**c) == x**c*(a + b)
# issue 6096: 2 stays with c (unless c is integer or x is positive0
assert collect(a*x**(2*c) + b*x**(2*c), x**c) == x**(2*c)*(a + b)
def test_collect_5():
"""Collect with respect to a tuple"""
a, x, y, z, n = symbols('a,x,y,z,n')
assert collect(x**2*y**4 + z*(x*y**2)**2 + z + a*z, [x*y**2, z]) in [
z*(1 + a + x**2*y**4) + x**2*y**4,
z*(1 + a) + x**2*y**4*(1 + z) ]
assert collect((1 + (x + y) + (x + y)**2).expand(),
[x, y]) == 1 + y + x*(1 + 2*y) + x**2 + y**2
def test_collect_D():
D = Derivative
f = Function('f')
x, a, b = symbols('x,a,b')
fx = D(f(x), x)
fxx = D(f(x), x, x)
assert collect(a*fx + b*fx, fx) == (a + b)*fx
assert collect(a*D(fx, x) + b*D(fx, x), fx) == (a + b)*D(fx, x)
assert collect(a*fxx + b*fxx, fx) == (a + b)*D(fx, x)
# issue 4784
assert collect(5*f(x) + 3*fx, fx) == 5*f(x) + 3*fx
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x)) == \
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x), exact=True) == \
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(1/f(x) + 1/f(x)*diff(f(x), x) + x*diff(f(x), x)/f(x), f(x).diff(x), exact=True) == \
(1/f(x) + x/f(x))*D(f(x), x) + 1/f(x)
def test_collect_func():
f = ((x + a + 1)**3).expand()
assert collect(f, x) == a**3 + 3*a**2 + 3*a + x**3 + x**2*(3*a + 3) + \
x*(3*a**2 + 6*a + 3) + 1
assert collect(f, x, factor) == x**3 + 3*x**2*(a + 1) + 3*x*(a + 1)**2 + \
(a + 1)**3
assert collect(f, x, evaluate=False) == {
S.One: a**3 + 3*a**2 + 3*a + 1,
x: 3*a**2 + 6*a + 3, x**2: 3*a + 3,
x**3: 1
}
def test_collect_order():
a, b, x, t = symbols('a,b,x,t')
assert collect(t + t*x + t*x**2 + O(x**3), t) == t*(1 + x + x**2 + O(x**3))
assert collect(t + t*x + x**2 + O(x**3), t) == \
t*(1 + x + O(x**3)) + x**2 + O(x**3)
f = a*x + b*x + c*x**2 + d*x**2 + O(x**3)
g = x*(a + b) + x**2*(c + d) + O(x**3)
assert collect(f, x) == g
assert collect(f, x, distribute_order_term=False) == g
f = sin(a + b).series(b, 0, 10)
assert collect(f, [sin(a), cos(a)]) == \
sin(a)*cos(b).series(b, 0, 10) + cos(a)*sin(b).series(b, 0, 10)
assert collect(f, [sin(a), cos(a)], distribute_order_term=False) == \
sin(a)*cos(b).series(b, 0, 10).removeO() + \
cos(a)*sin(b).series(b, 0, 10).removeO() + O(b**10)
def test_rcollect():
assert rcollect((x**2*y + x*y + x + y)/(x + y), y) == \
(x + y*(1 + x + x**2))/(x + y)
assert rcollect(sqrt(-((x + 1)*(y + 1))), z) == sqrt(-((x + 1)*(y + 1)))
@XFAIL
def test_collect_func_xfail():
# XXX: this test will pass when automatic constant distribution is removed (issue 4596)
assert collect(f, x, factor, evaluate=False) == {S.One: (a + 1)**3,
x: 3*(a + 1)**2, x**2: 3*(a + 1), x**3: 1}
@XFAIL
def test_collect_issues():
D = Derivative
f = Function('f')
e = (1 + x*D(f(x), x) + D(f(x), x))/f(x)
assert collect(e.expand(), f(x).diff(x)) != e
def test_collect_D_0():
D = Derivative
f = Function('f')
x, a, b = symbols('x,a,b')
fxx = D(f(x), x, x)
# collect does not distinguish nested derivatives, so it returns
# -- (a + b)*D(D(f, x), x)
assert collect(a*fxx + b*fxx, fxx) == (a + b)*fxx
def test_collect_Wild():
"""Collect with respect to functions with Wild argument"""
a, b, x, y = symbols('a b x y')
f = Function('f')
w1 = Wild('.1')
w2 = Wild('.2')
assert collect(f(x) + a*f(x), f(w1)) == (1 + a)*f(x)
assert collect(f(x, y) + a*f(x, y), f(w1)) == f(x, y) + a*f(x, y)
assert collect(f(x, y) + a*f(x, y), f(w1, w2)) == (1 + a)*f(x, y)
assert collect(f(x, y) + a*f(x, y), f(w1, w1)) == f(x, y) + a*f(x, y)
assert collect(f(x, x) + a*f(x, x), f(w1, w1)) == (1 + a)*f(x, x)
assert collect(a*(x + 1)**y + (x + 1)**y, w1**y) == (1 + a)*(x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, w1**b) == \
a*(x + 1)**y + (x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, (x + 1)**w2) == \
(1 + a)*(x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, w1**w2) == (1 + a)*(x + 1)**y
def test_collect_const():
# coverage not provided by above tests
assert collect_const(2*sqrt(3) + 4*a*sqrt(5)) == \
2*(2*sqrt(5)*a + sqrt(3)) # let the primitive reabsorb
assert collect_const(2*sqrt(3) + 4*a*sqrt(5), sqrt(3)) == \
2*sqrt(3) + 4*a*sqrt(5)
assert collect_const(sqrt(2)*(1 + sqrt(2)) + sqrt(3) + x*sqrt(2)) == \
sqrt(2)*(x + 1 + sqrt(2)) + sqrt(3)
# issue 5290
assert collect_const(2*x + 2*y + 1, 2) == \
collect_const(2*x + 2*y + 1) == \
Add(S(1), Mul(2, x + y, evaluate=False), evaluate=False)
assert collect_const(-y - z) == Mul(-1, y + z, evaluate=False)
assert collect_const(2*x - 2*y - 2*z, 2) == \
Mul(2, x - y - z, evaluate=False)
assert collect_const(2*x - 2*y - 2*z, -2) == \
_unevaluated_Add(2*x, Mul(-2, y + z, evaluate=False))
# this is why the content_primitive is used
eq = (sqrt(15 + 5*sqrt(2))*x + sqrt(3 + sqrt(2))*y)*2
assert collect_sqrt(eq + 2) == \
2*sqrt(sqrt(2) + 3)*(sqrt(5)*x + y) + 2
def test_issue_6097():
assert collect(a*y**(2.0*x) + b*y**(2.0*x), y**x) == y**(2.0*x)*(a + b)
assert collect(a*2**(2.0*x) + b*2**(2.0*x), 2**x) == 2**(2.0*x)*(a + b)
def test_fraction_expand():
eq = (x + y)*y/x
assert eq.expand(frac=True) == fraction_expand(eq) == (x*y + y**2)/x
assert eq.expand() == y + y**2/x
def test_fraction():
x, y, z = map(Symbol, 'xyz')
A = Symbol('A', commutative=False)
assert fraction(Rational(1, 2)) == (1, 2)
assert fraction(x) == (x, 1)
assert fraction(1/x) == (1, x)
assert fraction(x/y) == (x, y)
assert fraction(x/2) == (x, 2)
assert fraction(x*y/z) == (x*y, z)
assert fraction(x/(y*z)) == (x, y*z)
assert fraction(1/y**2) == (1, y**2)
assert fraction(x/y**2) == (x, y**2)
assert fraction((x**2 + 1)/y) == (x**2 + 1, y)
assert fraction(x*(y + 1)/y**7) == (x*(y + 1), y**7)
assert fraction(exp(-x), exact=True) == (exp(-x), 1)
assert fraction(x*A/y) == (x*A, y)
assert fraction(x*A**-1/y) == (x*A**-1, y)
n = symbols('n', negative=True)
assert fraction(exp(n)) == (1, exp(-n))
assert fraction(exp(-n)) == (exp(-n), 1)
def test_issue_5615():
aA, Re, a, b, D = symbols('aA Re a b D')
e = ((D**3*a + b*aA**3)/Re).expand()
assert collect(e, [aA**3/Re, a]) == e
def test_issue_5933():
from sympy import Polygon, RegularPolygon, denom
x = Polygon(*RegularPolygon((0, 0), 1, 5).vertices).centroid.x
assert abs(denom(x).n()) > 1e-12
assert abs(denom(radsimp(x))) > 1e-12 # in case simplify didn't handle it
| bsd-3-clause |
beacloudgenius/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/split.py | 4 | 146687 | """
Provides full versioning CRUD and representation for collections of xblocks (e.g., courses, modules, etc).
Representation:
* course_index: a dictionary:
** '_id': a unique id which cannot change,
** 'org': the org's id. Only used for searching not identity,
** 'course': the course's catalog number
** 'run': the course's run id,
** 'edited_by': user_id of user who created the original entry,
** 'edited_on': the datetime of the original creation,
** 'versions': versions_dict: {branch_id: structure_id, ...}
** 'search_targets': a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
* structure:
** '_id': an ObjectId (guid),
** 'root': BlockKey (the block_type and block_id of the root block in the 'blocks' dictionary)
** 'previous_version': the structure from which this one was derived. For published courses, this
points to the previously published version of the structure not the draft published to this.
** 'original_version': the original structure id in the previous_version relation. Is a pseudo object
identifier enabling quick determination if 2 structures have any shared history,
** 'edited_by': user_id of the user whose change caused the creation of this structure version,
** 'edited_on': the datetime for the change causing this creation of this structure version,
** 'blocks': dictionary of xblocks in this structure:
*** BlockKey: key mapping to each BlockData:
*** BlockData: object containing the following attributes:
**** 'block_type': the xblock type id
**** 'definition': the db id of the record containing the content payload for this xblock
**** 'fields': the Scope.settings and children field values
***** 'children': This is stored as a list of (block_type, block_id) pairs
**** 'defaults': Scope.settings default values copied from a template block (used e.g. when
blocks are copied from a library to a course)
**** 'edit_info': EditInfo object:
***** 'edited_on': when was this xblock's fields last changed (will be edited_on value of
update_version structure)
***** 'edited_by': user_id for who changed this xblock last (will be edited_by value of
update_version structure)
***** 'update_version': the guid for the structure where this xblock got its current field
values. This may point to a structure not in this structure's history (e.g., to a draft
branch from which this version was published.)
***** 'previous_version': the guid for the structure which previously changed this xblock
(will be the previous value of update_version; so, may point to a structure not in this
structure's history.)
***** 'source_version': the guid for the structure was copied/published into this block
* definition: shared content with revision history for xblock content fields
** '_id': definition_id (guid),
** 'block_type': xblock type id
** 'fields': scope.content (and possibly other) field values.
** 'edit_info': dictionary:
*** 'edited_by': user_id whose edit caused this version of the definition,
*** 'edited_on': datetime of the change causing this version
*** 'previous_version': the definition_id of the previous version of this definition
*** 'original_version': definition_id of the root of the previous version relation on this
definition. Acts as a pseudo-object identifier.
"""
import copy
import datetime
import hashlib
import logging
from contracts import contract, new_contract
from importlib import import_module
from mongodb_proxy import autoretry_read
from path import path
from pytz import UTC
from bson.objectid import ObjectId
from xblock.core import XBlock
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.errortracker import null_error_tracker
from opaque_keys.edx.locator import (
BlockUsageLocator, DefinitionLocator, CourseLocator, LibraryLocator, VersionTree, LocalId,
)
from xmodule.modulestore.exceptions import InsufficientSpecificationError, VersionConflictError, DuplicateItemError, \
DuplicateCourseError
from xmodule.modulestore import (
inheritance, ModuleStoreWriteBase, ModuleStoreEnum,
BulkOpsRecord, BulkOperationsMixin, SortedAssetList, BlockData
)
from ..exceptions import ItemNotFoundError
from .caching_descriptor_system import CachingDescriptorSystem
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection, DuplicateKeyError
from xmodule.modulestore.split_mongo import BlockKey, CourseEnvelope
from xmodule.error_module import ErrorDescriptor
from collections import defaultdict
from types import NoneType
from xmodule.assetstore import AssetMetadata
log = logging.getLogger(__name__)
# ==============================================================================
#
# Known issue:
# Inheritance for cached kvs doesn't work on edits. Use case.
# 1) attribute foo is inheritable
# 2) g.children = [p], p.children = [a]
# 3) g.foo = 1 on load
# 4) if g.foo > 0, if p.foo > 0, if a.foo > 0 all eval True
# 5) p.foo = -1
# 6) g.foo > 0, p.foo <= 0 all eval True BUT
# 7) BUG: a.foo > 0 still evals True but should be False
# 8) reread and everything works right
# 9) p.del(foo), p.foo > 0 is True! works
# 10) BUG: a.foo < 0!
# Local fix wont' permanently work b/c xblock may cache a.foo...
#
# ==============================================================================
# When blacklists are this, all children should be excluded
EXCLUDE_ALL = '*'
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('BlockKey', BlockKey)
new_contract('XBlock', XBlock)
class SplitBulkWriteRecord(BulkOpsRecord):
def __init__(self):
super(SplitBulkWriteRecord, self).__init__()
self.initial_index = None
self.index = None
self.structures = {}
self.structures_in_db = set()
# dict(version_guid, dict(BlockKey, module))
self.modules = defaultdict(dict)
self.definitions = {}
self.definitions_in_db = set()
# TODO: This needs to track which branches have actually been modified/versioned,
# so that copying one branch to another doesn't update the original branch.
@property
def dirty_branches(self):
"""
Return a list of which branch version ids differ from what was stored
in the database at the beginning of this bulk operation.
"""
# If no course index has been set, then no branches have changed
if self.index is None:
return []
# If there was no index in the database to start with, then all branches
# are dirty by definition
if self.initial_index is None:
return self.index.get('versions', {}).keys()
# Return branches whose ids differ between self.index and self.initial_index
return [
branch
for branch, _id
in self.index.get('versions', {}).items()
if self.initial_index.get('versions', {}).get(branch) != _id
]
def structure_for_branch(self, branch):
return self.structures.get(self.index.get('versions', {}).get(branch))
def set_structure_for_branch(self, branch, structure):
if self.index is not None:
self.index.setdefault('versions', {})[branch] = structure['_id']
self.structures[structure['_id']] = structure
def __repr__(self):
return u"SplitBulkWriteRecord<{!r}, {!r}, {!r}, {!r}, {!r}>".format(
self._active_count,
self.initial_index,
self.index,
self.structures,
self.structures_in_db,
)
class SplitBulkWriteMixin(BulkOperationsMixin):
"""
This implements the :meth:`bulk_operations` modulestore semantics for the :class:`SplitMongoModuleStore`.
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface, and then exposes a set of methods
for interacting with course_indexes and structures that can be used by :class:`SplitMongoModuleStore`.
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values to ``self.mongo_connection`` when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
_bulk_ops_record_type = SplitBulkWriteRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.SplitBulkWriteRecord` for this course.
"""
# handle split specific things and defer to super otherwise
if course_key is None:
return self._bulk_ops_record_type()
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError(u'{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
# handle version_guid based retrieval locally
if course_key.org is None or course_key.course is None or course_key.run is None:
return self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
# handle ignore case and general use
return super(SplitBulkWriteMixin, self)._get_bulk_ops_record(
course_key.replace(branch=None, version_guid=None), ignore_case
)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError('{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
if course_key.org and course_key.course and course_key.run:
del self._active_bulk_ops.records[course_key.replace(branch=None, version_guid=None)]
else:
del self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
def _start_outermost_bulk_operation(self, bulk_write_record, course_key):
"""
Begin a bulk write operation on course_key.
"""
bulk_write_record.initial_index = self.db_connection.get_course_index(course_key)
# Ensure that any edits to the index don't pollute the initial_index
bulk_write_record.index = copy.deepcopy(bulk_write_record.initial_index)
def _end_outermost_bulk_operation(self, bulk_write_record, structure_key, emit_signals=True):
"""
End the active bulk write operation on structure_key (course or library key).
"""
dirty = False
# If the content is dirty, then update the database
for _id in bulk_write_record.structures.viewkeys() - bulk_write_record.structures_in_db:
dirty = True
try:
self.db_connection.insert_structure(bulk_write_record.structures[_id])
except DuplicateKeyError:
# We may not have looked up this structure inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate structure %s", _id)
for _id in bulk_write_record.definitions.viewkeys() - bulk_write_record.definitions_in_db:
dirty = True
try:
self.db_connection.insert_definition(bulk_write_record.definitions[_id])
except DuplicateKeyError:
# We may not have looked up this definition inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate definition %s", _id)
if bulk_write_record.index is not None and bulk_write_record.index != bulk_write_record.initial_index:
dirty = True
if bulk_write_record.initial_index is None:
self.db_connection.insert_course_index(bulk_write_record.index)
else:
self.db_connection.update_course_index(bulk_write_record.index, from_index=bulk_write_record.initial_index)
if dirty and emit_signals:
self.send_bulk_published_signal(bulk_write_record, structure_key)
self.send_bulk_library_updated_signal(bulk_write_record, structure_key)
def get_course_index(self, course_key, ignore_case=False):
"""
Return the index for course_key.
"""
if self._is_in_bulk_operation(course_key, ignore_case):
return self._get_bulk_ops_record(course_key, ignore_case).index
else:
return self.db_connection.get_course_index(course_key, ignore_case)
def delete_course_index(self, course_key):
"""
Delete the course index from cache and the db
"""
if self._is_in_bulk_operation(course_key, False):
self._clear_bulk_ops_record(course_key)
self.db_connection.delete_course_index(course_key)
def insert_course_index(self, course_key, index_entry):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = index_entry
else:
self.db_connection.insert_course_index(index_entry)
def update_course_index(self, course_key, updated_index_entry):
"""
Change the given course's index entry.
Note, this operation can be dangerous and break running courses.
Does not return anything useful.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = updated_index_entry
else:
self.db_connection.update_course_index(updated_index_entry)
def get_structure(self, course_key, version_guid):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
structure = bulk_write_record.structures.get(version_guid)
# The structure hasn't been loaded from the db yet, so load it
if structure is None:
structure = self.db_connection.get_structure(version_guid)
bulk_write_record.structures[version_guid] = structure
if structure is not None:
bulk_write_record.structures_in_db.add(version_guid)
return structure
else:
# cast string to ObjectId if necessary
version_guid = course_key.as_object_id(version_guid)
return self.db_connection.get_structure(version_guid)
def update_structure(self, course_key, structure):
"""
Update a course structure, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
self._clear_cache(structure['_id'])
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.structures[structure['_id']] = structure
else:
self.db_connection.insert_structure(structure)
def get_cached_block(self, course_key, version_guid, block_id):
"""
If there's an active bulk_operation, see if it's cached this module and just return it
Don't do any extra work to get the ones which are not cached. Make the caller do the work & cache them.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
return bulk_write_record.modules[version_guid].get(block_id, None)
else:
return None
def cache_block(self, course_key, version_guid, block_key, block):
"""
The counterpart to :method `get_cached_block` which caches a block.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.modules[version_guid][block_key] = block
def decache_block(self, course_key, version_guid, block_key):
"""
Write operations which don't write from blocks must remove the target blocks from the cache.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
try:
del bulk_write_record.modules[version_guid][block_key]
except KeyError:
pass
def get_definition(self, course_key, definition_guid):
"""
Retrieve a single definition by id, respecting the active bulk operation
on course_key.
Args:
course_key (:class:`.CourseKey`): The course being operated on
definition_guid (str or ObjectID): The id of the definition to load
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
definition = bulk_write_record.definitions.get(definition_guid)
# The definition hasn't been loaded from the db yet, so load it
if definition is None:
definition = self.db_connection.get_definition(definition_guid)
bulk_write_record.definitions[definition_guid] = definition
if definition is not None:
bulk_write_record.definitions_in_db.add(definition_guid)
return definition
else:
# cast string to ObjectId if necessary
definition_guid = course_key.as_object_id(definition_guid)
return self.db_connection.get_definition(definition_guid)
def get_definitions(self, course_key, ids):
"""
Return all definitions that specified in ``ids``.
If a definition with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
course_key (:class:`.CourseKey`): The course that these definitions are being loaded
for (to respect bulk operations).
ids (list): A list of definition ids
"""
definitions = []
ids = set(ids)
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
# Only query for the definitions that aren't already cached.
for definition in bulk_write_record.definitions.values():
definition_id = definition.get('_id')
if definition_id in ids:
ids.remove(definition_id)
definitions.append(definition)
if len(ids):
# Query the db for the definitions.
defs_from_db = self.db_connection.get_definitions(list(ids))
# Add the retrieved definitions to the cache.
bulk_write_record.definitions.update({d.get('_id'): d for d in defs_from_db})
definitions.extend(defs_from_db)
return definitions
def update_definition(self, course_key, definition):
"""
Update a definition, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.definitions[definition['_id']] = definition
else:
self.db_connection.insert_definition(definition)
def version_structure(self, course_key, structure, user_id):
"""
Copy the structure and update the history info (edited_by, edited_on, previous_version)
"""
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
bulk_write_record = self._get_bulk_ops_record(course_key)
# If we have an active bulk write, and it's already been edited, then just use that structure
if bulk_write_record.active and course_key.branch in bulk_write_record.dirty_branches:
return bulk_write_record.structure_for_branch(course_key.branch)
# Otherwise, make a new structure
new_structure = copy.deepcopy(structure)
new_structure['_id'] = ObjectId()
new_structure['previous_version'] = structure['_id']
new_structure['edited_by'] = user_id
new_structure['edited_on'] = datetime.datetime.now(UTC)
new_structure['schema_version'] = self.SCHEMA_VERSION
# If we're in a bulk write, update the structure used there, and mark it as dirty
if bulk_write_record.active:
bulk_write_record.set_structure_for_branch(course_key.branch, new_structure)
return new_structure
def version_block(self, block_data, user_id, update_version):
"""
Update the block_data object based on it having been edited.
"""
if block_data.edit_info.update_version == update_version:
return
original_usage = block_data.edit_info.original_usage
original_usage_version = block_data.edit_info.original_usage_version
block_data.edit_info.edited_on = datetime.datetime.now(UTC)
block_data.edit_info.edited_by = user_id
block_data.edit_info.previous_version = block_data.edit_info.update_version
block_data.edit_info.update_version = update_version
if original_usage:
block_data.edit_info.original_usage = original_usage
block_data.edit_info.original_usage_version = original_usage_version
def find_matching_course_indexes(self, branch=None, search_targets=None, org_target=None):
"""
Find the course_indexes which have the specified branch and search_targets. An optional org_target
can be specified to apply an ORG filter to return only the courses that are part of
that ORG.
Returns:
a Cursor if there are no changes in flight or a list if some have changed in current bulk op
"""
indexes = self.db_connection.find_matching_course_indexes(branch, search_targets, org_target)
def _replace_or_append_index(altered_index):
"""
If the index is already in indexes, replace it. Otherwise, append it.
"""
for index, existing in enumerate(indexes):
if all(existing[attr] == altered_index[attr] for attr in ['org', 'course', 'run']):
indexes[index] = altered_index
return
indexes.append(altered_index)
# add any being built but not yet persisted or in the process of being updated
for _, record in self._active_records:
if branch and branch not in record.index.get('versions', {}):
continue
if search_targets:
if any(
'search_targets' not in record.index or
field not in record.index['search_targets'] or
record.index['search_targets'][field] != value
for field, value in search_targets.iteritems()
):
continue
# if we've specified a filter by org,
# make sure we've honored that filter when
# integrating in-transit records
if org_target:
if record.index['org'] != org_target:
continue
if not hasattr(indexes, 'append'): # Just in time conversion to list from cursor
indexes = list(indexes)
_replace_or_append_index(record.index)
return indexes
def find_structures_by_id(self, ids):
"""
Return all structures that specified in ``ids``.
If a structure with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
ids (list): A list of structure ids
"""
structures = []
ids = set(ids)
for _, record in self._active_records:
for structure in record.structures.values():
structure_id = structure.get('_id')
if structure_id in ids:
ids.remove(structure_id)
structures.append(structure)
structures.extend(self.db_connection.find_structures_by_id(list(ids)))
return structures
def find_structures_derived_from(self, ids):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if structure.get('previous_version') in ids:
structures.append(structure)
if '_id' in structure:
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_structures_derived_from(ids)
if structure['_id'] not in found_structure_ids
)
return structures
def find_ancestor_structures(self, original_version, block_key):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Any structure found in the cache will be preferred to a structure with the same id from the database.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if 'original_version' not in structure:
continue
if structure['original_version'] != original_version:
continue
if block_key not in structure.get('blocks', {}):
continue
if 'update_version' not in structure['blocks'][block_key].get('edit_info', {}):
continue
structures.append(structure)
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_ancestor_structures(original_version, block_key)
if structure['_id'] not in found_structure_ids
)
return structures
class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
"""
A Mongodb backed ModuleStore supporting versions, inheritance,
and sharing.
"""
SCHEMA_VERSION = 1
# a list of field names to store in course index search_targets. Note, this will
# only record one value per key. If branches disagree, the last one set wins.
# It won't recompute the value on operations such as update_course_index (e.g., to revert to a prev
# version) but those functions will have an optional arg for setting these.
SEARCH_TARGET_DICT = ['wiki_slug']
def __init__(self, contentstore, doc_store_config, fs_root, render_template,
default_class=None,
error_tracker=null_error_tracker,
i18n_service=None, fs_service=None, user_service=None,
services=None, signal_handler=None, **kwargs):
"""
:param doc_store_config: must have a host, db, and collection entries. Other common entries: port, tz_aware.
"""
super(SplitMongoModuleStore, self).__init__(contentstore, **kwargs)
self.db_connection = MongoConnection(**doc_store_config)
self.db = self.db_connection.database
if default_class is not None:
module_path, __, class_name = default_class.rpartition('.')
class_ = getattr(import_module(module_path), class_name)
self.default_class = class_
else:
self.default_class = None
self.fs_root = path(fs_root)
self.error_tracker = error_tracker
self.render_template = render_template
self.services = services or {}
if i18n_service is not None:
self.services["i18n"] = i18n_service
if fs_service is not None:
self.services["fs"] = fs_service
if user_service is not None:
self.services["user"] = user_service
self.signal_handler = signal_handler
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.db.connection.close()
def mongo_wire_version(self):
"""
Returns the wire version for mongo. Only used to unit tests which instrument the connection.
"""
return self.db.connection.max_wire_version
def _drop_database(self):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
"""
# drop the assets
super(SplitMongoModuleStore, self)._drop_database()
connection = self.db.connection
connection.drop_database(self.db.name)
connection.close()
def cache_items(self, system, base_block_ids, course_key, depth=0, lazy=True):
"""
Handles caching of items once inheritance and any other one time
per course per fetch operations are done.
Arguments:
system: a CachingDescriptorSystem
base_block_ids: list of BlockIds to fetch
course_key: the destination course providing the context
depth: how deep below these to prefetch
lazy: whether to load definitions now or later
"""
with self.bulk_operations(course_key, emit_signals=False):
new_module_data = {}
for block_id in base_block_ids:
new_module_data = self.descendants(
system.course_entry.structure['blocks'],
block_id,
depth,
new_module_data
)
# This method supports lazy loading, where the descendent definitions aren't loaded
# until they're actually needed.
if not lazy:
# Non-lazy loading: Load all descendants by id.
descendent_definitions = self.get_definitions(
course_key,
[
block.definition
for block in new_module_data.itervalues()
]
)
# Turn definitions into a map.
definitions = {definition['_id']: definition
for definition in descendent_definitions}
for block in new_module_data.itervalues():
if block.definition in definitions:
definition = definitions[block.definition]
# convert_fields gets done later in the runtime's xblock_from_json
block.fields.update(definition.get('fields'))
block.definition_loaded = True
system.module_data.update(new_module_data)
return system.module_data
@contract(course_entry=CourseEnvelope, block_keys="list(BlockKey)", depth="int | None")
def _load_items(self, course_entry, block_keys, depth=0, **kwargs):
"""
Load & cache the given blocks from the course. May return the blocks in any order.
Load the definitions into each block if lazy is in kwargs and is False;
otherwise, do not load the definitions - they'll be loaded later when needed.
"""
runtime = self._get_cache(course_entry.structure['_id'])
if runtime is None:
lazy = kwargs.pop('lazy', True)
runtime = self.create_runtime(course_entry, lazy)
self._add_cache(course_entry.structure['_id'], runtime)
self.cache_items(runtime, block_keys, course_entry.course_key, depth, lazy)
return [runtime.load_item(block_key, course_entry, **kwargs) for block_key in block_keys]
def _get_cache(self, course_version_guid):
"""
Find the descriptor cache for this course if it exists
:param course_version_guid:
"""
if self.request_cache is None:
return None
return self.request_cache.data.setdefault('course_cache', {}).get(course_version_guid)
def _add_cache(self, course_version_guid, system):
"""
Save this cache for subsequent access
:param course_version_guid:
:param system:
"""
if self.request_cache is not None:
self.request_cache.data.setdefault('course_cache', {})[course_version_guid] = system
return system
def _clear_cache(self, course_version_guid=None):
"""
Should only be used by testing or something which implements transactional boundary semantics.
:param course_version_guid: if provided, clear only this entry
"""
if self.request_cache is None:
return
if course_version_guid:
try:
del self.request_cache.data.setdefault('course_cache', {})[course_version_guid]
except KeyError:
pass
else:
self.request_cache.data['course_cache'] = {}
def _lookup_course(self, course_key, head_validation=True):
"""
Decode the locator into the right series of db access. Does not
return the CourseDescriptor! It returns the actual db json from
structures.
Semantics: if course id and branch given, then it will get that branch. If
also give a version_guid, it will see if the current head of that branch == that guid. If not
it raises VersionConflictError (the version now differs from what it was when you got your
reference) unless you specify head_validation = False, in which case it will return the
revision (if specified) by the course_key.
:param course_key: any subclass of CourseLocator
"""
if not course_key.version_guid:
head_validation = True
if head_validation and course_key.org and course_key.course and course_key.run:
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
# use the course id
index = self.get_course_index(course_key)
if index is None:
raise ItemNotFoundError(course_key)
if course_key.branch not in index['versions']:
raise ItemNotFoundError(course_key)
version_guid = index['versions'][course_key.branch]
if course_key.version_guid is not None and version_guid != course_key.version_guid:
# This may be a bit too touchy but it's hard to infer intent
raise VersionConflictError(course_key, version_guid)
elif course_key.version_guid is None:
raise InsufficientSpecificationError(course_key)
else:
# TODO should this raise an exception if branch was provided?
version_guid = course_key.version_guid
entry = self.get_structure(course_key, version_guid)
if entry is None:
raise ItemNotFoundError('Structure: {}'.format(version_guid))
# b/c more than one course can use same structure, the 'org', 'course',
# 'run', and 'branch' are not intrinsic to structure
# and the one assoc'd w/ it by another fetch may not be the one relevant to this fetch; so,
# add it in the envelope for the structure.
return CourseEnvelope(course_key.replace(version_guid=version_guid), entry)
def _get_structures_for_branch(self, branch, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
"""
# if we pass in a 'org' parameter that means to
# only get the course which match the passed in
# ORG
matching_indexes = self.find_matching_course_indexes(
branch,
search_targets=None,
org_target=kwargs.get('org')
)
# collect ids and then query for those
version_guids = []
id_version_map = {}
for course_index in matching_indexes:
version_guid = course_index['versions'][branch]
version_guids.append(version_guid)
id_version_map[version_guid] = course_index
if not version_guids:
return
for entry in self.find_structures_by_id(version_guids):
yield entry, id_version_map[entry['_id']]
def _get_structures_for_branch_and_locator(self, branch, locator_factory, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
:param str branch: Branch to fetch structures from
:param type locator_factory: Factory to create locator from structure info and branch
"""
result = []
for entry, structure_info in self._get_structures_for_branch(branch, **kwargs):
locator = locator_factory(structure_info, branch)
envelope = CourseEnvelope(locator, entry)
root = entry['root']
structures_list = self._load_items(envelope, [root], depth=0, **kwargs)
if not isinstance(structures_list[0], ErrorDescriptor):
result.append(structures_list[0])
return result
def _create_course_locator(self, course_info, branch):
"""
Creates course locator using course_info dict and branch
"""
return CourseLocator(
org=course_info['org'],
course=course_info['course'],
run=course_info['run'],
branch=branch,
)
def _create_library_locator(self, library_info, branch):
"""
Creates library locator using library_info dict and branch
"""
return LibraryLocator(
org=library_info['org'],
library=library_info['course'],
branch=branch,
)
@autoretry_read()
def get_courses(self, branch, **kwargs):
"""
Returns a list of course descriptors matching any given qualifiers.
qualifiers should be a dict of keywords matching the db fields or any
legal query for mongo to use against the active_versions collection.
Note, this is to find the current head of the named branch type.
To get specific versions via guid use get_course.
:param branch: the branch for which to return courses.
"""
# get the blocks for each course index (s/b the root)
return self._get_structures_for_branch_and_locator(branch, self._create_course_locator, **kwargs)
def get_libraries(self, branch="library", **kwargs):
"""
Returns a list of "library" root blocks matching any given qualifiers.
TODO: better way of identifying library index entry vs. course index entry.
"""
return self._get_structures_for_branch_and_locator(branch, self._create_library_locator, **kwargs)
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
return CourseLocator(org, course, run)
def _get_structure(self, structure_id, depth, head_validation=True, **kwargs):
"""
Gets Course or Library by locator
"""
structure_entry = self._lookup_course(structure_id, head_validation=head_validation)
root = structure_entry.structure['root']
result = self._load_items(structure_entry, [root], depth, **kwargs)
return result[0]
def get_course(self, course_id, depth=0, **kwargs):
"""
Gets the course descriptor for the course identified by the locator
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_id)
return self._get_structure(course_id, depth, **kwargs)
def get_library(self, library_id, depth=0, head_validation=True, **kwargs):
"""
Gets the 'library' root block for the library identified by the locator
"""
if not isinstance(library_id, LibraryLocator):
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(library_id)
return self._get_structure(library_id, depth, head_validation=head_validation, **kwargs)
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
Does this course exist in this modulestore. This method does not verify that the branch &/or
version in the course_id exists. Use get_course_index_info to check that.
Returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
course_index = self.get_course_index(course_id, ignore_case)
return CourseLocator(course_index['org'], course_index['course'], course_index['run'], course_id.branch) if course_index else None
def has_library(self, library_id, ignore_case=False, **kwargs):
"""
Does this library exist in this modulestore. This method does not verify that the branch &/or
version in the library_id exists.
Returns the library_id of the course if it was found, else None.
"""
if not isinstance(library_id, LibraryLocator):
return None
index = self.get_course_index(library_id, ignore_case)
if index:
return LibraryLocator(index['org'], index['course'], library_id.branch)
return None
def has_item(self, usage_key):
"""
Returns True if usage_key exists in its course. Returns false if
the course or the block w/in the course do not exist for the given version.
raises InsufficientSpecificationError if the usage_key does not id a block
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
if usage_key.block_id is None:
raise InsufficientSpecificationError(usage_key)
try:
course_structure = self._lookup_course(usage_key.course_key).structure
except ItemNotFoundError:
# this error only occurs if the course does not exist
return False
return self._get_block_from_structure(course_structure, BlockKey.from_usage_key(usage_key)) is not None
@contract(returns='XBlock')
def get_item(self, usage_key, depth=0, **kwargs):
"""
depth (int): An argument that some module stores may use to prefetch
descendants of the queried modules for more efficient results later
in the request. The depth is counted in the number of
calls to get_children() to cache. None indicates to cache all
descendants.
raises InsufficientSpecificationError or ItemNotFoundError
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_key)
with self.bulk_operations(usage_key.course_key):
course = self._lookup_course(usage_key.course_key)
items = self._load_items(course, [BlockKey.from_usage_key(usage_key)], depth, **kwargs)
if len(items) == 0:
raise ItemNotFoundError(usage_key)
elif len(items) > 1:
log.debug("Found more than one item for '{}'".format(usage_key))
return items[0]
def get_items(self, course_locator, settings=None, content=None, qualifiers=None, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_locator
NOTE: don't use this to look for courses as the course_locator is required. Use get_courses.
Args:
course_locator (CourseLocator): the course identifier
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as qualifiers below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as qualifiers below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
For substring matching pass a regex object.
For split,
you can search by ``edited_by``, ``edited_on`` providing a function testing limits.
"""
if not isinstance(course_locator, CourseLocator) or course_locator.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
return []
course = self._lookup_course(course_locator)
items = []
qualifiers = qualifiers.copy() if qualifiers else {} # copy the qualifiers (destructively manipulated here)
def _block_matches_all(block_data):
"""
Check that the block matches all the criteria
"""
# do the checks which don't require loading any additional data
if ( # pylint: disable=bad-continuation
self._block_matches(block_data, qualifiers) and
self._block_matches(block_data.fields, settings)
):
if content:
definition_block = self.get_definition(course_locator, block_data.definition)
return self._block_matches(definition_block['fields'], content)
else:
return True
if settings is None:
settings = {}
if 'name' in qualifiers:
# odd case where we don't search just confirm
block_name = qualifiers.pop('name')
block_ids = []
for block_id, block in course.structure['blocks'].iteritems():
if block_name == block_id.id and _block_matches_all(block):
block_ids.append(block_id)
return self._load_items(course, block_ids, **kwargs)
if 'category' in qualifiers:
qualifiers['block_type'] = qualifiers.pop('category')
# don't expect caller to know that children are in fields
if 'children' in qualifiers:
settings['children'] = qualifiers.pop('children')
for block_id, value in course.structure['blocks'].iteritems():
if _block_matches_all(value):
items.append(block_id)
if len(items) > 0:
return self._load_items(course, items, depth=0, **kwargs)
else:
return []
def get_parent_location(self, locator, **kwargs):
"""
Return the location (Locators w/ block_ids) for the parent of this location in this
course. Could use get_items(location, {'children': block_id}) but this is slightly faster.
NOTE: the locator must contain the block_id, and this code does not actually ensure block_id exists
:param locator: BlockUsageLocator restricting search scope
"""
if not isinstance(locator, BlockUsageLocator) or locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(locator)
course = self._lookup_course(locator.course_key)
parent_ids = self._get_parents_from_structure(BlockKey.from_usage_key(locator), course.structure)
if len(parent_ids) == 0:
return None
# find alphabetically least
parent_ids.sort(key=lambda parent: (parent.type, parent.id))
return BlockUsageLocator.make_relative(
locator,
block_type=parent_ids[0].type,
block_id=parent_ids[0].id,
)
def get_orphans(self, course_key, **kwargs):
"""
Return an array of all of the orphans in the course.
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
detached_categories = [name for name, __ in XBlock.load_tagged_classes("detached")]
course = self._lookup_course(course_key)
items = set(course.structure['blocks'].keys())
items.remove(course.structure['root'])
blocks = course.structure['blocks']
for block_id, block_data in blocks.iteritems():
items.difference_update(BlockKey(*child) for child in block_data.fields.get('children', []))
if block_data.block_type in detached_categories:
items.discard(block_id)
return [
course_key.make_usage_key(block_type=block_id.type, block_id=block_id.id)
for block_id in items
]
def get_course_index_info(self, course_key):
"""
The index records the initial creation of the indexed course and tracks the current version
heads. This function is primarily for test verification but may serve some
more general purpose.
:param course_key: must have a org, course, and run set
:return {'org': string,
versions: {'draft': the head draft version id,
'published': the head published version id if any,
},
'edited_by': who created the course originally (named edited for consistency),
'edited_on': when the course was originally created
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
if not (course_key.course and course_key.run and course_key.org):
return None
index = self.get_course_index(course_key)
return index
# TODO figure out a way to make this info accessible from the course descriptor
def get_course_history_info(self, course_key):
"""
Because xblocks doesn't give a means to separate the course structure's meta information from
the course xblock's, this method will get that info for the structure as a whole.
:param course_key:
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
course = self._lookup_course(course_key).structure
return {
'original_version': course['original_version'],
'previous_version': course['previous_version'],
'edited_by': course['edited_by'],
'edited_on': course['edited_on']
}
def get_definition_history_info(self, definition_locator):
"""
Because xblocks doesn't give a means to separate the definition's meta information from
the usage xblock's, this method will get that info for the definition
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(definition_locator, DefinitionLocator) or definition_locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(definition_locator)
definition = self.db_connection.get_definition(definition_locator.definition_id)
if definition is None:
return None
return definition['edit_info']
def get_course_successors(self, course_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this course. Return as a VersionTree
Mostly makes sense when course_locator uses a version_guid, but because it finds all relevant
next versions, these do include those created for other courses.
:param course_locator:
"""
if not isinstance(course_locator, CourseLocator) or course_locator.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_locator)
if version_history_depth < 1:
return None
if course_locator.version_guid is None:
course = self._lookup_course(course_locator)
version_guid = course.structure['_id']
course_locator = course_locator.for_version(version_guid)
else:
version_guid = course_locator.version_guid
# TODO if depth is significant, it may make sense to get all that have the same original_version
# and reconstruct the subtree from version_guid
next_entries = self.find_structures_derived_from([version_guid])
# must only scan cursor's once
next_versions = [struct for struct in next_entries]
result = {version_guid: [CourseLocator(version_guid=struct['_id']) for struct in next_versions]}
depth = 1
while depth < version_history_depth and len(next_versions) > 0:
depth += 1
next_entries = self.find_structures_derived_from([struct['_id'] for struct in next_versions])
next_versions = [struct for struct in next_entries]
for course_structure in next_versions:
result.setdefault(course_structure['previous_version'], []).append(
CourseLocator(version_guid=struct['_id']))
return VersionTree(course_locator, result)
def get_block_generations(self, block_locator):
"""
Find the history of this block. Return as a VersionTree of each place the block changed (except
deletion).
The block's history tracks its explicit changes but not the changes in its children starting
from when the block was created.
"""
# course_agnostic means we don't care if the head and version don't align, trust the version
course_struct = self._lookup_course(block_locator.course_key.course_agnostic()).structure
block_key = BlockKey.from_usage_key(block_locator)
all_versions_with_block = self.find_ancestor_structures(
original_version=course_struct['original_version'],
block_key=block_key
)
# find (all) root versions and build map {previous: {successors}..}
possible_roots = []
result = {}
for version in all_versions_with_block:
block_payload = self._get_block_from_structure(version, block_key)
if version['_id'] == block_payload.edit_info.update_version:
if block_payload.edit_info.previous_version is None:
# this was when this block was created
possible_roots.append(block_payload.edit_info.update_version)
else: # map previous to {update..}
result.setdefault(block_payload.edit_info.previous_version, set()).add(
block_payload.edit_info.update_version)
# more than one possible_root means usage was added and deleted > 1x.
if len(possible_roots) > 1:
# find the history segment including block_locator's version
element_to_find = self._get_block_from_structure(course_struct, block_key).edit_info.update_version
if element_to_find in possible_roots:
possible_roots = [element_to_find]
for possibility in possible_roots:
if self._find_local_root(element_to_find, possibility, result):
possible_roots = [possibility]
break
elif len(possible_roots) == 0:
return None
# convert the results value sets to locators
for k, versions in result.iteritems():
result[k] = [
block_locator.for_version(version)
for version in versions
]
return VersionTree(
block_locator.for_version(possible_roots[0]),
result
)
def get_definition_successors(self, definition_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this definition. Return as a VersionTree
"""
# TODO implement
pass
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator and version from
which the copy was inherited.
Returns usage_key, version if the data is available, otherwise returns (None, None)
"""
blocks = self._lookup_course(usage_key.course_key).structure['blocks']
block = blocks.get(BlockKey.from_usage_key(usage_key))
if block and block.edit_info.original_usage is not None:
usage_key = BlockUsageLocator.from_string(block.edit_info.original_usage)
return usage_key, block.edit_info.original_usage_version
return None, None
def create_definition_from_data(self, course_key, new_def_data, category, user_id):
"""
Pull the definition fields out of descriptor and save to the db as a new definition
w/o a predecessor and return the new id.
:param user_id: request.user object
"""
new_def_data = self._serialize_fields(category, new_def_data)
new_id = ObjectId()
document = {
'_id': new_id,
"block_type": category,
"fields": new_def_data,
"edit_info": {
"edited_by": user_id,
"edited_on": datetime.datetime.now(UTC),
"previous_version": None,
"original_version": new_id,
},
'schema_version': self.SCHEMA_VERSION,
}
self.update_definition(course_key, document)
definition_locator = DefinitionLocator(category, new_id)
return definition_locator
def update_definition_from_data(self, course_key, definition_locator, new_def_data, user_id):
"""
See if new_def_data differs from the persisted version. If so, update
the persisted version and return the new id.
:param user_id: request.user
"""
def needs_saved():
for key, value in new_def_data.iteritems():
if key not in old_definition['fields'] or value != old_definition['fields'][key]:
return True
for key, value in old_definition.get('fields', {}).iteritems():
if key not in new_def_data:
return True
# if this looks in cache rather than fresh fetches, then it will probably not detect
# actual change b/c the descriptor and cache probably point to the same objects
old_definition = self.get_definition(course_key, definition_locator.definition_id)
if old_definition is None:
raise ItemNotFoundError(definition_locator)
new_def_data = self._serialize_fields(old_definition['block_type'], new_def_data)
if needs_saved():
definition_locator = self._update_definition_from_data(course_key, old_definition, new_def_data, user_id)
return definition_locator, True
else:
return definition_locator, False
def _update_definition_from_data(self, course_key, old_definition, new_def_data, user_id):
"""
Update the persisted version of the given definition and return the
locator of the new definition. Does not check if data differs from the
previous version.
"""
new_definition = copy.deepcopy(old_definition)
new_definition['_id'] = ObjectId()
new_definition['fields'] = new_def_data
new_definition['edit_info']['edited_by'] = user_id
new_definition['edit_info']['edited_on'] = datetime.datetime.now(UTC)
# previous version id
new_definition['edit_info']['previous_version'] = old_definition['_id']
new_definition['schema_version'] = self.SCHEMA_VERSION
self.update_definition(course_key, new_definition)
return DefinitionLocator(new_definition['block_type'], new_definition['_id'])
def _generate_block_key(self, course_blocks, category):
"""
Generate a somewhat readable block id unique w/in this course using the category
:param course_blocks: the current list of blocks.
:param category:
"""
# NOTE: a potential bug is that a block is deleted and another created which gets the old
# block's id. a possible fix is to cache the last serial in a dict in the structure
# {category: last_serial...}
# A potential confusion is if the name incorporates the parent's name, then if the child
# moves, its id won't change and will be confusing
serial = 1
while True:
potential_key = BlockKey(category, "{}{}".format(category, serial))
if potential_key not in course_blocks:
return potential_key
serial += 1
@contract(returns='XBlock')
def create_item(
self, user_id, course_key, block_type, block_id=None,
definition_locator=None, fields=None,
force=False, **kwargs
):
"""
Add a descriptor to persistence as an element
of the course. Return the resulting post saved version with populated locators.
:param course_key: If it has a version_guid and a course org + course + run + branch, this
method ensures that the version is the head of the given course branch before making the change.
raises InsufficientSpecificationError if there is no course locator.
raises VersionConflictError if the version_guid of the course_or_parent_locator is not the head
of the its course unless force is true.
:param force: fork the structure and don't update the course draftVersion if the above
:param continue_revision: for multistep transactions, continue revising the given version rather than creating
a new version. Setting force to True conflicts with setting this to True and will cause a VersionConflictError
:param definition_locator: should either be None to indicate this is a brand new definition or
a pointer to the existing definition to which this block should point or from which this was derived
or a LocalId to indicate that it's new.
If fields does not contain any Scope.content, then definition_locator must have a value meaning that this
block points
to the existing definition. If fields contains Scope.content and definition_locator is not None, then
the Scope.content fields are assumed to be a new payload for definition_locator.
:param block_id: if provided, must not already exist in the structure. Provides the block id for the
new item in this structure. Otherwise, one is computed using the category appended w/ a few digits.
This method creates a new version of the course structure unless the course has a bulk_write operation
active.
It creates and inserts the new block, makes the block point
to the definition which may be new or a new version of an existing or an existing.
Rules for course locator:
* If the course locator specifies a org and course and run and either it doesn't
specify version_guid or the one it specifies == the current head of the branch,
it progresses the course to point
to the new head and sets the active version to point to the new head
* If the locator has a org and course and run but its version_guid != current head, it raises VersionConflictError.
NOTE: using a version_guid will end up creating a new version of the course. Your new item won't be in
the course id'd by version_guid but instead in one w/ a new version_guid. Ensure in this case that you get
the new version_guid from the locator in the returned object!
"""
with self.bulk_operations(course_key):
# split handles all the fields in one dict not separated by scope
fields = fields or {}
fields.update(kwargs.pop('metadata', {}) or {})
definition_data = kwargs.pop('definition_data', {})
if definition_data:
if not isinstance(definition_data, dict):
definition_data = {'data': definition_data} # backward compatibility to mongo's hack
fields.update(definition_data)
# find course_index entry if applicable and structures entry
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
partitioned_fields = self.partition_fields_by_scope(block_type, fields)
new_def_data = partitioned_fields.get(Scope.content, {})
# persist the definition if persisted != passed
if (definition_locator is None or isinstance(definition_locator.definition_id, LocalId)):
definition_locator = self.create_definition_from_data(course_key, new_def_data, block_type, user_id)
elif new_def_data:
definition_locator, _ = self.update_definition_from_data(course_key, definition_locator, new_def_data, user_id)
# copy the structure and modify the new one
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
# generate usage id
if block_id is not None:
block_key = BlockKey(block_type, block_id)
if block_key in new_structure['blocks']:
raise DuplicateItemError(block_id, self, 'structures')
else:
block_key = self._generate_block_key(new_structure['blocks'], block_type)
block_fields = partitioned_fields.get(Scope.settings, {})
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
self._update_block_in_structure(new_structure, block_key, self._new_block(
user_id,
block_type,
block_fields,
definition_locator.definition_id,
new_id,
))
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
# see if any search targets changed
if fields is not None:
self._update_search_targets(index_entry, fields)
self._update_head(course_key, index_entry, course_key.branch, new_id)
item_loc = BlockUsageLocator(
course_key.version_agnostic(),
block_type=block_type,
block_id=block_key.id,
)
else:
item_loc = BlockUsageLocator(
CourseLocator(version_guid=new_id),
block_type=block_type,
block_id=block_key.id,
)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# reconstruct the new_item from the cache
return self.get_item(item_loc)
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
with self.bulk_operations(parent_usage_key.course_key):
xblock = self.create_item(
user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields,
**kwargs)
# skip attach to parent if xblock has 'detached' tag
if 'detached' in xblock._class_tags: # pylint: disable=protected-access
return xblock
# don't version the structure as create_item handled that already.
new_structure = self._lookup_course(xblock.location.course_key).structure
# add new block as child and update parent's version
block_id = BlockKey.from_usage_key(parent_usage_key)
if block_id not in new_structure['blocks']:
raise ItemNotFoundError(parent_usage_key)
parent = new_structure['blocks'][block_id]
# Originally added to support entrance exams (settings.FEATURES.get('ENTRANCE_EXAMS'))
if kwargs.get('position') is None:
parent.fields.setdefault('children', []).append(BlockKey.from_usage_key(xblock.location))
else:
parent.fields.setdefault('children', []).insert(
kwargs.get('position'),
BlockKey.from_usage_key(xblock.location)
)
if parent.edit_info.update_version != new_structure['_id']:
# if the parent hadn't been previously changed in this bulk transaction, indicate that it's
# part of the bulk transaction
self.version_block(parent, user_id, new_structure['_id'])
self.decache_block(parent_usage_key.course_key, new_structure['_id'], block_id)
# db update
self.update_structure(parent_usage_key.course_key, new_structure)
# don't need to update the index b/c create_item did it for this version
return xblock
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See :meth: `.ModuleStoreWrite.clone_course` for documentation.
In split, other than copying the assets, this is cheap as it merely creates a new version of the
existing course.
"""
source_index = self.get_course_index_info(source_course_id)
if source_index is None:
raise ItemNotFoundError("Cannot find a course at {0}. Aborting".format(source_course_id))
with self.bulk_operations(dest_course_id):
new_course = self.create_course(
dest_course_id.org, dest_course_id.course, dest_course_id.run,
user_id,
fields=fields,
versions_dict=source_index['versions'],
search_targets=source_index['search_targets'],
skip_auto_publish=True,
**kwargs
)
# don't copy assets until we create the course in case something's awry
super(SplitMongoModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
return new_course
DEFAULT_ROOT_COURSE_BLOCK_ID = 'course'
DEFAULT_ROOT_LIBRARY_BLOCK_ID = 'library'
def create_course(
self, org, course, run, user_id, master_branch=None, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Create a new entry in the active courses index which points to an existing or new structure. Returns
the course root of the resulting entry (the location has the course id)
Arguments:
org (str): the organization that owns the course
course (str): the course number of the course
run (str): the particular run of the course (e.g. 2013_T1)
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
course + run: If there are duplicates, this method will raise DuplicateCourseError
fields: if scope.settings fields provided, will set the fields of the root course object in the
new course. If both
settings fields and a starting version are provided (via versions_dict), it will generate a successor version
to the given version,
and update the settings fields with any provided values (via update not setting).
fields (content): if scope.content fields provided, will update the fields of the new course
xblock definition to this. Like settings fields,
if provided, this will cause a new version of any given version as well as a new version of the
definition (which will point to the existing one if given a version). If not provided and given
a version_dict, it will reuse the same definition as that version's course
(obvious since it's reusing the
course). If not provided and no version_dict is given, it will be empty and get the field defaults
when
loaded.
master_branch: the tag (key) for the version name in the dict which is the DRAFT version. Not the actual
version guid, but what to call it.
search_targets: a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
versions_dict: the starting version ids where the keys are the tags such as DRAFT and PUBLISHED
and the values are structure guids. If provided, the new course will reuse this version (unless you also
provide any fields overrides, see above). if not provided, will create a mostly empty course
structure with just a category course root xblock.
"""
# either need to assert this or have a default
assert master_branch is not None
# check course and run's uniqueness
locator = CourseLocator(org=org, course=course, run=run, branch=master_branch)
return self._create_courselike(
locator, user_id, master_branch, fields, versions_dict,
search_targets, root_category, root_block_id, **kwargs
)
def _create_courselike(
self, locator, user_id, master_branch, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Internal code for creating a course or library
"""
index = self.get_course_index(locator)
if index is not None:
raise DuplicateCourseError(locator, index)
partitioned_fields = self.partition_fields_by_scope(root_category, fields)
block_fields = partitioned_fields[Scope.settings]
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
definition_fields = self._serialize_fields(root_category, partitioned_fields.get(Scope.content, {}))
# build from inside out: definition, structure, index entry
# if building a wholly new structure
if versions_dict is None or master_branch not in versions_dict:
# create new definition and structure
definition_id = self.create_definition_from_data(locator, definition_fields, root_category, user_id).definition_id
draft_structure = self._new_structure(
user_id,
BlockKey(
root_category,
root_block_id or SplitMongoModuleStore.DEFAULT_ROOT_COURSE_BLOCK_ID,
),
block_fields,
definition_id
)
new_id = draft_structure['_id']
if versions_dict is None:
versions_dict = {master_branch: new_id}
else:
versions_dict[master_branch] = new_id
elif block_fields or definition_fields: # pointing to existing course w/ some overrides
# just get the draft_version structure
draft_version = CourseLocator(version_guid=versions_dict[master_branch])
draft_structure = self._lookup_course(draft_version).structure
draft_structure = self.version_structure(locator, draft_structure, user_id)
new_id = draft_structure['_id']
root_block = draft_structure['blocks'][draft_structure['root']]
if block_fields is not None:
root_block.fields.update(self._serialize_fields(root_category, block_fields))
if definition_fields is not None:
old_def = self.get_definition(locator, root_block.definition)
new_fields = old_def['fields']
new_fields.update(definition_fields)
definition_id = self._update_definition_from_data(locator, old_def, new_fields, user_id).definition_id
root_block.definition = definition_id
root_block.edit_info.edited_on = datetime.datetime.now(UTC)
root_block.edit_info.edited_by = user_id
root_block.edit_info.previous_version = root_block.edit_info.update_version
root_block.edit_info.update_version = new_id
versions_dict[master_branch] = new_id
else: # Pointing to an existing course structure
new_id = versions_dict[master_branch]
draft_version = CourseLocator(version_guid=new_id)
draft_structure = self._lookup_course(draft_version).structure
locator = locator.replace(version_guid=new_id)
with self.bulk_operations(locator):
self.update_structure(locator, draft_structure)
index_entry = {
'_id': ObjectId(),
'org': locator.org,
'course': locator.course,
'run': locator.run,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'versions': versions_dict,
'schema_version': self.SCHEMA_VERSION,
'search_targets': search_targets or {},
}
if fields is not None:
self._update_search_targets(index_entry, fields)
self.insert_course_index(locator, index_entry)
# expensive hack to persist default field values set in __init__ method (e.g., wiki_slug)
if isinstance(locator, LibraryLocator):
course = self.get_library(locator, **kwargs)
else:
course = self.get_course(locator, **kwargs)
return self.update_item(course, user_id, **kwargs)
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Create a new library. Arguments are similar to create_course().
"""
kwargs["fields"] = fields
kwargs["master_branch"] = kwargs.get("master_branch", ModuleStoreEnum.BranchName.library)
kwargs["root_category"] = kwargs.get("root_category", "library")
kwargs["root_block_id"] = kwargs.get("root_block_id", "library")
locator = LibraryLocator(org=org, library=library, branch=kwargs["master_branch"])
return self._create_courselike(locator, user_id, **kwargs)
def update_item(self, descriptor, user_id, allow_not_found=False, force=False, **kwargs):
"""
Save the descriptor's fields. it doesn't descend the course dag to save the children.
Return the new descriptor (updated location).
raises ItemNotFoundError if the location does not exist.
Creates a new course version. If the descriptor's location has a org and course and run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
The implementation tries to detect which, if any changes, actually need to be saved and thus won't version
the definition, structure, nor course if they didn't change.
"""
partitioned_fields = self.partition_xblock_fields_by_scope(descriptor)
return self._update_item_from_fields(
user_id, descriptor.location.course_key, BlockKey.from_usage_key(descriptor.location),
partitioned_fields, descriptor.definition_locator, allow_not_found, force, **kwargs
) or descriptor
def _update_item_from_fields(
self, user_id, course_key, block_key, partitioned_fields,
definition_locator, allow_not_found, force, **kwargs
):
"""
Broke out guts of update_item for short-circuited internal use only
"""
with self.bulk_operations(course_key):
if allow_not_found and isinstance(block_key.id, (LocalId, NoneType)):
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(
user_id, course_key, block_key.type, fields=fields, force=force
)
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key, force)
original_entry = self._get_block_from_structure(original_structure, block_key)
if original_entry is None:
if allow_not_found:
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(
user_id, course_key, block_key.type, block_id=block_key.id, fields=fields, force=force,
)
else:
raise ItemNotFoundError(course_key.make_usage_key(block_key.type, block_key.id))
is_updated = False
definition_fields = partitioned_fields[Scope.content]
if definition_locator is None:
definition_locator = DefinitionLocator(original_entry.block_type, original_entry.definition)
if definition_fields:
definition_locator, is_updated = self.update_definition_from_data(
course_key, definition_locator, definition_fields, user_id
)
# check metadata
settings = partitioned_fields[Scope.settings]
settings = self._serialize_fields(block_key.type, settings)
if not is_updated:
is_updated = self._compare_settings(settings, original_entry.fields)
# check children
if partitioned_fields.get(Scope.children, {}): # purposely not 'is not None'
serialized_children = [BlockKey.from_usage_key(child) for child in partitioned_fields[Scope.children]['children']]
is_updated = is_updated or original_entry.fields.get('children', []) != serialized_children
if is_updated:
settings['children'] = serialized_children
# if updated, rev the structure
if is_updated:
new_structure = self.version_structure(course_key, original_structure, user_id)
block_data = self._get_block_from_structure(new_structure, block_key)
block_data.definition = definition_locator.definition_id
block_data.fields = settings
new_id = new_structure['_id']
self.version_block(block_data, user_id, new_id)
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_search_targets(index_entry, definition_fields)
self._update_search_targets(index_entry, settings)
if isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(
org=index_entry['org'],
library=index_entry['course'],
branch=course_key.branch,
version_guid=new_id
)
else:
course_key = CourseLocator(
org=index_entry['org'],
course=index_entry['course'],
run=index_entry['run'],
branch=course_key.branch,
version_guid=new_id
)
self._update_head(course_key, index_entry, course_key.branch, new_id)
elif isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(version_guid=new_id)
else:
course_key = CourseLocator(version_guid=new_id)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# fetch and return the new item--fetching is unnecessary but a good qc step
new_locator = course_key.make_usage_key(block_key.type, block_key.id)
return self.get_item(new_locator, **kwargs)
else:
return None
# pylint: disable=unused-argument
def create_xblock(
self, runtime, course_key, block_type, block_id=None, fields=None,
definition_id=None, parent_xblock=None, **kwargs
):
"""
This method instantiates the correct subclass of XModuleDescriptor based
on the contents of json_data. It does not persist it and can create one which
has no usage id.
parent_xblock is used to compute inherited metadata as well as to append the new xblock.
json_data:
- 'block_type': the xmodule block_type
- 'fields': a dict of locally set fields (not inherited) in json format not pythonic typed format!
- 'definition': the object id of the existing definition
"""
assert runtime is not None
xblock_class = runtime.load_block_type(block_type)
json_data = {
'block_type': block_type,
'fields': {},
}
if definition_id is not None:
json_data['definition'] = definition_id
if parent_xblock is None:
# If no parent, then nothing to inherit.
inherited_settings = {}
else:
inherited_settings = parent_xblock.xblock_kvs.inherited_settings.copy()
if fields is not None:
for field_name in inheritance.InheritanceMixin.fields:
if field_name in fields:
inherited_settings[field_name] = fields[field_name]
new_block = runtime.xblock_from_json(
xblock_class,
course_key,
BlockKey(block_type, block_id) if block_id else None,
BlockData(**json_data),
**kwargs
)
for field_name, value in (fields or {}).iteritems():
setattr(new_block, field_name, value)
if parent_xblock is not None:
parent_xblock.children.append(new_block.scope_ids.usage_id)
# decache pending children field settings
parent_xblock.save()
return new_block
def persist_xblock_dag(self, xblock, user_id, force=False):
"""
create or update the xblock and all of its children. The xblock's location must specify a course.
If it doesn't specify a usage_id, then it's presumed to be new and need creation. This function
descends the children performing the same operation for any that are xblocks. Any children which
are block_ids just update the children pointer.
All updates go into the same course version (bulk updater).
Updates the objects which came in w/ updated location and definition_location info.
returns the post-persisted version of the incoming xblock. Note that its children will be ids not
objects.
:param xblock: the head of the dag
:param user_id: who's doing the change
"""
# find course_index entry if applicable and structures entry
course_key = xblock.location.course_key
with self.bulk_operations(course_key):
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
is_updated = self._persist_subdag(course_key, xblock, user_id, new_structure['blocks'], new_id)
if is_updated:
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_head(course_key, index_entry, xblock.location.branch, new_id)
# fetch and return the new item--fetching is unnecessary but a good qc step
return self.get_item(xblock.location.for_version(new_id))
else:
return xblock
def _persist_subdag(self, course_key, xblock, user_id, structure_blocks, new_id):
# persist the definition if persisted != passed
partitioned_fields = self.partition_xblock_fields_by_scope(xblock)
new_def_data = self._serialize_fields(xblock.category, partitioned_fields[Scope.content])
is_updated = False
if xblock.definition_locator is None or isinstance(xblock.definition_locator.definition_id, LocalId):
xblock.definition_locator = self.create_definition_from_data(
course_key, new_def_data, xblock.category, user_id
)
is_updated = True
elif new_def_data:
xblock.definition_locator, is_updated = self.update_definition_from_data(
course_key, xblock.definition_locator, new_def_data, user_id
)
if isinstance(xblock.scope_ids.usage_id.block_id, LocalId):
# generate an id
is_new = True
is_updated = True
block_id = getattr(xblock.scope_ids.usage_id.block_id, 'block_id', None)
if block_id is None:
block_key = self._generate_block_key(structure_blocks, xblock.scope_ids.block_type)
else:
block_key = BlockKey(xblock.scope_ids.block_type, block_id)
new_usage_id = xblock.scope_ids.usage_id.replace(block_id=block_key.id)
xblock.scope_ids = xblock.scope_ids._replace(usage_id=new_usage_id) # pylint: disable=protected-access
else:
is_new = False
block_key = BlockKey(xblock.scope_ids.block_type, xblock.scope_ids.usage_id.block_id)
children = []
if xblock.has_children:
for child in xblock.children:
if isinstance(child.block_id, LocalId):
child_block = xblock.system.get_block(child)
is_updated = self._persist_subdag(course_key, child_block, user_id, structure_blocks, new_id) or is_updated
children.append(BlockKey.from_usage_key(child_block.location))
else:
children.append(BlockKey.from_usage_key(child))
is_updated = is_updated or structure_blocks[block_key].fields['children'] != children
block_fields = partitioned_fields[Scope.settings]
block_fields = self._serialize_fields(xblock.category, block_fields)
if not is_new and not is_updated:
is_updated = self._compare_settings(block_fields, structure_blocks[block_key].fields)
if children:
block_fields['children'] = children
if is_updated:
if is_new:
block_info = self._new_block(
user_id,
xblock.category,
block_fields,
xblock.definition_locator.definition_id,
new_id,
raw=True
)
else:
block_info = structure_blocks[block_key]
block_info.fields = block_fields
block_info.definition = xblock.definition_locator.definition_id
self.version_block(block_info, user_id, new_id)
structure_blocks[block_key] = block_info
return is_updated
def _compare_settings(self, settings, original_fields):
"""
Return True if the settings are not == to the original fields
:param settings:
:param original_fields:
"""
original_keys = original_fields.keys()
if 'children' in original_keys:
original_keys.remove('children')
if len(settings) != len(original_keys):
return True
else:
new_keys = settings.keys()
for key in original_keys:
if key not in new_keys or original_fields[key] != settings[key]:
return True
def copy(self, user_id, source_course, destination_course, subtree_list=None, blacklist=None):
"""
Copies each xblock in subtree_list and those blocks descendants excluding blacklist
from source_course to destination_course.
To delete a block in the destination_course, copy its parent and blacklist the other
sibs to keep them from being copies. You can also just call delete_item on the destination.
Ensures that each subtree occurs in the same place in destination as it does in source. If any
of the source's subtree parents are missing from destination, it raises ItemNotFound([parent_ids]).
To determine the same relative order vis-a-vis published siblings,
publishing may involve changing the order of previously published siblings. For example,
if publishing `[c, d]` and source parent has children `[a, b, c, d, e]` and destination parent
currently has children `[e, b]`, there's no obviously correct resulting order; thus, publish will
reorder destination to `[b, c, d, e]` to make it conform with the source.
:param source_course: a CourseLocator (can be a version or course w/ branch)
:param destination_course: a CourseLocator which must be an existing course but branch doesn't have
to exist yet. (The course must exist b/c Locator doesn't have everything necessary to create it).
Note, if the branch doesn't exist, then the source_course structure's root must be in subtree_list;
otherwise, the publish will violate the parents must exist rule.
:param subtree_list: a list of usage keys whose subtrees to publish.
:param blacklist: a list of usage keys to not change in the destination: i.e., don't add
if not there, don't update if there.
Raises:
ItemNotFoundError: if it cannot find the course. if the request is to publish a
subtree but the ancestors up to and including the course root are not published.
"""
# get the destination's index, and source and destination structures.
with self.bulk_operations(source_course):
source_structure = self._lookup_course(source_course).structure
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
# brand new course
raise ItemNotFoundError(destination_course)
if destination_course.branch not in index_entry['versions']:
# must be copying the dag root if there's no current dag
root_block_key = source_structure['root']
if not any(root_block_key == BlockKey.from_usage_key(subtree) for subtree in subtree_list):
raise ItemNotFoundError(u'Must publish course root {}'.format(root_block_key))
root_source = source_structure['blocks'][root_block_key]
# create branch
destination_structure = self._new_structure(
user_id, root_block_key,
# leave off the fields b/c the children must be filtered
definition_id=root_source.definition,
)
else:
destination_structure = self._lookup_course(destination_course).structure
destination_structure = self.version_structure(destination_course, destination_structure, user_id)
if blacklist != EXCLUDE_ALL:
blacklist = [BlockKey.from_usage_key(shunned) for shunned in blacklist or []]
# iterate over subtree list filtering out blacklist.
orphans = set()
destination_blocks = destination_structure['blocks']
for subtree_root in subtree_list:
if BlockKey.from_usage_key(subtree_root) != source_structure['root']:
# find the parents and put root in the right sequence
parents = self._get_parents_from_structure(BlockKey.from_usage_key(subtree_root), source_structure)
parent_found = False
for parent in parents:
# If a parent isn't found in the destination_blocks, it's possible it was renamed
# in the course export. Continue and only throw an exception if *no* parents are found.
if parent in destination_blocks:
parent_found = True
orphans.update(
self._sync_children(
source_structure['blocks'][parent],
destination_blocks[parent],
BlockKey.from_usage_key(subtree_root)
)
)
if len(parents) and not parent_found:
raise ItemNotFoundError(parents)
# update/create the subtree and its children in destination (skipping blacklist)
orphans.update(
self._copy_subdag(
user_id, destination_structure['_id'],
BlockKey.from_usage_key(subtree_root),
source_structure['blocks'],
destination_blocks,
blacklist
)
)
# remove any remaining orphans
for orphan in orphans:
# orphans will include moved as well as deleted xblocks. Only delete the deleted ones.
self._delete_if_true_orphan(orphan, destination_structure)
# update the db
self.update_structure(destination_course, destination_structure)
self._update_head(destination_course, index_entry, destination_course.branch, destination_structure['_id'])
@contract(source_keys="list(BlockUsageLocator)", dest_usage=BlockUsageLocator)
def copy_from_template(self, source_keys, dest_usage, user_id, head_validation=True):
"""
Flexible mechanism for inheriting content from an external course/library/etc.
Will copy all of the XBlocks whose keys are passed as `source_course` so that they become
children of the XBlock whose key is `dest_usage`. Any previously existing children of
`dest_usage` that haven't been replaced/updated by this copy_from_template operation will
be deleted.
Unlike `copy()`, this does not care whether the resulting blocks are positioned similarly
in their new course/library. However, the resulting blocks will be in the same relative
order as `source_keys`.
If any of the blocks specified already exist as children of the destination block, they
will be updated rather than duplicated or replaced. If they have Scope.settings field values
overriding inherited default values, those overrides will be preserved.
IMPORTANT: This method does not preserve block_id - in other words, every block that is
copied will be assigned a new block_id. This is because we assume that the same source block
may be copied into one course in multiple places. However, it *is* guaranteed that every
time this method is called for the same source block and dest_usage, the same resulting
block id will be generated.
:param source_keys: a list of BlockUsageLocators. Order is preserved.
:param dest_usage: The BlockUsageLocator that will become the parent of an inherited copy
of all the xblocks passed in `source_keys`.
:param user_id: The user who will get credit for making this change.
"""
# Preload the block structures for all source courses/libraries/etc.
# so that we can access descendant information quickly
source_structures = {}
for key in source_keys:
course_key = key.course_key
if course_key.branch is None:
raise ItemNotFoundError("branch is required for all source keys when using copy_from_template")
if course_key not in source_structures:
with self.bulk_operations(course_key):
source_structures[course_key] = self._lookup_course(
course_key, head_validation=head_validation
).structure
destination_course = dest_usage.course_key
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
raise ItemNotFoundError(destination_course)
dest_structure = self._lookup_course(destination_course).structure
old_dest_structure_version = dest_structure['_id']
dest_structure = self.version_structure(destination_course, dest_structure, user_id)
# Set of all descendent block IDs of dest_usage that are to be replaced:
block_key = BlockKey(dest_usage.block_type, dest_usage.block_id)
orig_descendants = set(self.descendants(dest_structure['blocks'], block_key, depth=None, descendent_map={}))
# The descendants() method used above adds the block itself, which we don't consider a descendant.
orig_descendants.remove(block_key)
new_descendants = self._copy_from_template(
source_structures, source_keys, dest_structure, block_key, user_id, head_validation
)
# Update the edit info:
dest_info = dest_structure['blocks'][block_key]
# Update the edit_info:
dest_info.edit_info.previous_version = dest_info.edit_info.update_version
dest_info.edit_info.update_version = old_dest_structure_version
dest_info.edit_info.edited_by = user_id
dest_info.edit_info.edited_on = datetime.datetime.now(UTC)
orphans = orig_descendants - new_descendants
for orphan in orphans:
del dest_structure['blocks'][orphan]
self.update_structure(destination_course, dest_structure)
self._update_head(destination_course, index_entry, destination_course.branch, dest_structure['_id'])
# Return usage locators for all the new children:
return [
destination_course.make_usage_key(*k)
for k in dest_structure['blocks'][block_key].fields['children']
]
def _copy_from_template(
self, source_structures, source_keys, dest_structure, new_parent_block_key, user_id, head_validation
):
"""
Internal recursive implementation of copy_from_template()
Returns the new set of BlockKeys that are the new descendants of the block with key 'block_key'
"""
# pylint: disable=no-member
# ^-- Until pylint gets namedtuple support, it will give warnings about BlockKey attributes
new_blocks = set()
new_children = list() # ordered list of the new children of new_parent_block_key
for usage_key in source_keys:
src_course_key = usage_key.course_key
hashable_source_id = src_course_key.for_version(None)
block_key = BlockKey(usage_key.block_type, usage_key.block_id)
source_structure = source_structures[src_course_key]
if block_key not in source_structure['blocks']:
raise ItemNotFoundError(usage_key)
source_block_info = source_structure['blocks'][block_key]
# Compute a new block ID. This new block ID must be consistent when this
# method is called with the same (source_key, dest_structure) pair
unique_data = "{}:{}:{}".format(
unicode(hashable_source_id).encode("utf-8"),
block_key.id,
new_parent_block_key.id,
)
new_block_id = hashlib.sha1(unique_data).hexdigest()[:20]
new_block_key = BlockKey(block_key.type, new_block_id)
# Now clone block_key to new_block_key:
new_block_info = copy.deepcopy(source_block_info)
# Note that new_block_info now points to the same definition ID entry as source_block_info did
existing_block_info = dest_structure['blocks'].get(new_block_key, BlockData())
# Inherit the Scope.settings values from 'fields' to 'defaults'
new_block_info.defaults = new_block_info.fields
# <workaround>
# CAPA modules store their 'markdown' value (an alternate representation of their content)
# in Scope.settings rather than Scope.content :-/
# markdown is a field that really should not be overridable - it fundamentally changes the content.
# capa modules also use a custom editor that always saves their markdown field to the metadata,
# even if it hasn't changed, which breaks our override system.
# So until capa modules are fixed, we special-case them and remove their markdown fields,
# forcing the inherited version to use XML only.
if usage_key.block_type == 'problem' and 'markdown' in new_block_info.defaults:
del new_block_info.defaults['markdown']
# </workaround>
new_block_info.fields = existing_block_info.fields # Preserve any existing overrides
if 'children' in new_block_info.defaults:
del new_block_info.defaults['children'] # Will be set later
new_block_info.edit_info = existing_block_info.edit_info
new_block_info.edit_info.previous_version = new_block_info.edit_info.update_version
new_block_info.edit_info.update_version = dest_structure['_id']
# Note we do not set 'source_version' - it's only used for copying identical blocks
# from draft to published as part of publishing workflow.
# Setting it to the source_block_info structure version here breaks split_draft's has_changes() method.
new_block_info.edit_info.edited_by = user_id
new_block_info.edit_info.edited_on = datetime.datetime.now(UTC)
new_block_info.edit_info.original_usage = unicode(usage_key.replace(branch=None, version_guid=None))
new_block_info.edit_info.original_usage_version = source_block_info.edit_info.update_version
dest_structure['blocks'][new_block_key] = new_block_info
children = source_block_info.fields.get('children')
if children:
children = [src_course_key.make_usage_key(child.type, child.id) for child in children]
new_blocks |= self._copy_from_template(
source_structures, children, dest_structure, new_block_key, user_id, head_validation
)
new_blocks.add(new_block_key)
# And add new_block_key to the list of new_parent_block_key's new children:
new_children.append(new_block_key)
# Update the children of new_parent_block_key
dest_structure['blocks'][new_parent_block_key].fields['children'] = new_children
return new_blocks
def delete_item(self, usage_locator, user_id, force=False):
"""
Delete the block or tree rooted at block (if delete_children) and any references w/in the course to the block
from a new version of the course structure.
returns CourseLocator for new version
raises ItemNotFoundError if the location does not exist.
raises ValueError if usage_locator points to the structure root
Creates a new course version. If the descriptor's location has a org, a course, and a run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
"""
if not isinstance(usage_locator, BlockUsageLocator) or usage_locator.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_locator)
with self.bulk_operations(usage_locator.course_key):
original_structure = self._lookup_course(usage_locator.course_key).structure
block_key = BlockKey.from_usage_key(usage_locator)
if original_structure['root'] == block_key:
raise ValueError("Cannot delete the root of a course")
if block_key not in original_structure['blocks']:
raise ValueError("Cannot delete a block that does not exist")
index_entry = self._get_index_if_valid(usage_locator.course_key, force)
new_structure = self.version_structure(usage_locator.course_key, original_structure, user_id)
new_blocks = new_structure['blocks']
new_id = new_structure['_id']
parent_block_keys = self._get_parents_from_structure(block_key, original_structure)
for parent_block_key in parent_block_keys:
parent_block = new_blocks[parent_block_key]
parent_block.fields['children'].remove(block_key)
parent_block.edit_info.edited_on = datetime.datetime.now(UTC)
parent_block.edit_info.edited_by = user_id
parent_block.edit_info.previous_version = parent_block.edit_info.update_version
parent_block.edit_info.update_version = new_id
self.decache_block(usage_locator.course_key, new_id, parent_block_key)
self._remove_subtree(BlockKey.from_usage_key(usage_locator), new_blocks)
# update index if appropriate and structures
self.update_structure(usage_locator.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(usage_locator.course_key, index_entry, usage_locator.branch, new_id)
result = usage_locator.course_key.for_version(new_id)
else:
result = CourseLocator(version_guid=new_id)
if isinstance(usage_locator.course_key, LibraryLocator):
self._flag_library_updated_event(usage_locator.course_key)
return result
@contract(block_key=BlockKey, blocks='dict(BlockKey: BlockData)')
def _remove_subtree(self, block_key, blocks):
"""
Remove the subtree rooted at block_key
"""
for child in blocks[block_key].fields.get('children', []):
self._remove_subtree(BlockKey(*child), blocks)
del blocks[block_key]
def delete_course(self, course_key, user_id):
"""
Remove the given course from the course index.
Only removes the course from the index. The data remains. You can use create_course
with a versions hash to restore the course; however, the edited_on and
edited_by won't reflect the originals, of course.
"""
# this is the only real delete in the system. should it do something else?
log.info(u"deleting course from split-mongo: %s", course_key)
self.delete_course_index(course_key)
# We do NOT call the super class here since we need to keep the assets
# in case the course is later restored.
# super(SplitMongoModuleStore, self).delete_course(course_key, user_id)
@contract(block_map="dict(BlockKey: dict)", block_key=BlockKey)
def inherit_settings(
self, block_map, block_key, inherited_settings_map, inheriting_settings=None, inherited_from=None
):
"""
Updates block_data with any inheritable setting set by an ancestor and recurses to children.
"""
if block_key not in block_map:
return
block_data = block_map[block_key]
if inheriting_settings is None:
inheriting_settings = {}
if inherited_from is None:
inherited_from = []
# the currently passed down values take precedence over any previously cached ones
# NOTE: this should show the values which all fields would have if inherited: i.e.,
# not set to the locally defined value but to value set by nearest ancestor who sets it
inherited_settings_map.setdefault(block_key, {}).update(inheriting_settings)
# update the inheriting w/ what should pass to children
inheriting_settings = inherited_settings_map[block_key].copy()
block_fields = block_data.fields
for field_name in inheritance.InheritanceMixin.fields:
if field_name in block_fields:
inheriting_settings[field_name] = block_fields[field_name]
for child in block_fields.get('children', []):
try:
if child in inherited_from:
raise Exception(u'Infinite loop detected when inheriting to {}, having already inherited from {}'.format(child, inherited_from))
self.inherit_settings(
block_map,
BlockKey(*child),
inherited_settings_map,
inheriting_settings,
inherited_from + [child]
)
except KeyError:
# here's where we need logic for looking up in other structures when we allow cross pointers
# but it's also getting this during course creation if creating top down w/ children set or
# migration where the old mongo published had pointers to privates
pass
def descendants(self, block_map, block_id, depth, descendent_map):
"""
adds block and its descendants out to depth to descendent_map
Depth specifies the number of levels of descendants to return
(0 => this usage only, 1 => this usage and its children, etc...)
A depth of None returns all descendants
"""
if block_id not in block_map:
return descendent_map
if block_id not in descendent_map:
descendent_map[block_id] = block_map[block_id]
if depth is None or depth > 0:
depth = depth - 1 if depth is not None else None
for child in descendent_map[block_id].fields.get('children', []):
descendent_map = self.descendants(block_map, child, depth, descendent_map)
return descendent_map
def get_modulestore_type(self, course_key=None):
"""
Returns an enumeration-like type reflecting the type of this modulestore, per ModuleStoreEnum.Type.
Args:
course_key: just for signature compatibility
"""
return ModuleStoreEnum.Type.split
def _find_course_assets(self, course_key):
"""
Split specific lookup
"""
try:
course_assets = self._lookup_course(course_key).structure.get('assets', {})
except (InsufficientSpecificationError, VersionConflictError) as err:
log.warning(u'Error finding assets for org "%s" course "%s" on asset '
u'request. Either version of course_key is None or invalid.',
course_key.org, course_key.course)
return {}
return course_assets
def _update_course_assets(self, user_id, asset_key, update_function):
"""
A wrapper for functions wanting to manipulate assets. Gets and versions the structure,
passes the mutable array for either 'assets' or 'thumbnails' as well as the idx to the function for it to
update, then persists the changed data back into the course.
The update function can raise an exception if it doesn't want to actually do the commit. The
surrounding method probably should catch that exception.
"""
with self.bulk_operations(asset_key.course_key):
original_structure = self._lookup_course(asset_key.course_key).structure
index_entry = self._get_index_if_valid(asset_key.course_key)
new_structure = self.version_structure(asset_key.course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
asset_type = asset_key.asset_type
all_assets = SortedAssetList(iterable=[])
# Assets should be pre-sorted, so add them efficiently without sorting.
# extend() will raise a ValueError if the passed-in list is not sorted.
all_assets.extend(course_assets.setdefault(asset_type, []))
asset_idx = all_assets.find(asset_key)
all_assets_updated = update_function(all_assets, asset_idx)
new_structure['assets'][asset_type] = all_assets_updated.as_list()
# update index if appropriate and structures
self.update_structure(asset_key.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(asset_key.course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves a list of AssetMetadata to the modulestore. The list can be composed of multiple
asset types. This method is optimized for multiple inserts at once - it only re-saves the structure
at the end of all saves/updates.
"""
# Determine course key to use in bulk operation. Use the first asset assuming that
# all assets will be for the same course.
asset_key = asset_metadata_list[0].asset_id
course_key = asset_key.course_key
with self.bulk_operations(course_key):
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key)
new_structure = self.version_structure(course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
assets_by_type = self._save_assets_by_type(
course_key, asset_metadata_list, course_assets, user_id, import_only
)
for asset_type, assets in assets_by_type.iteritems():
new_structure['assets'][asset_type] = assets.as_list()
# update index if appropriate and structures
self.update_structure(course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves or updates a single asset. Simply makes it a list and calls the list save above.
"""
return self.save_asset_metadata_list([asset_metadata, ], user_id, import_only)
@contract(asset_key='AssetKey', attr_dict=dict)
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute: value pairs to set
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
def _internal_method(all_assets, asset_idx):
"""
Update the found item
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
# Form an AssetMetadata.
mdata = AssetMetadata(asset_key, asset_key.path)
mdata.from_storable(all_assets[asset_idx])
mdata.update(attr_dict)
# Generate a Mongo doc from the metadata and update the course asset info.
all_assets[asset_idx] = mdata.to_storable()
return all_assets
self._update_course_assets(user_id, asset_key, _internal_method)
@contract(asset_key='AssetKey')
def delete_asset_metadata(self, asset_key, user_id):
"""
Internal; deletes a single asset's metadata.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
def _internal_method(all_asset_info, asset_idx):
"""
Remove the item if it was found
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
all_asset_info.pop(asset_idx)
return all_asset_info
try:
self._update_course_assets(user_id, asset_key, _internal_method)
return 1
except ItemNotFoundError:
return 0
@contract(source_course_key='CourseKey', dest_course_key='CourseKey')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
"""
source_structure = self._lookup_course(source_course_key).structure
with self.bulk_operations(dest_course_key):
original_structure = self._lookup_course(dest_course_key).structure
index_entry = self._get_index_if_valid(dest_course_key)
new_structure = self.version_structure(dest_course_key, original_structure, user_id)
new_structure['assets'] = source_structure.get('assets', {})
new_structure['thumbnails'] = source_structure.get('thumbnails', [])
# update index if appropriate and structures
self.update_structure(dest_course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(dest_course_key, index_entry, dest_course_key.branch, new_structure['_id'])
def fix_not_found(self, course_locator, user_id):
"""
Only intended for rather low level methods to use. Goes through the children attrs of
each block removing any whose block_id is not a member of the course.
:param course_locator: the course to clean
"""
original_structure = self._lookup_course(course_locator).structure
index_entry = self._get_index_if_valid(course_locator)
new_structure = self.version_structure(course_locator, original_structure, user_id)
for block in new_structure['blocks'].itervalues():
if 'children' in block.fields:
block.fields['children'] = [
block_id for block_id in block.fields['children']
if block_id in new_structure['blocks']
]
self.update_structure(course_locator, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_locator, index_entry, course_locator.branch, new_structure['_id'])
def convert_references_to_keys(self, course_key, xblock_class, jsonfields, blocks):
"""
Convert the given serialized fields to the deserialized values by finding all references
and converting them.
:param jsonfields: the serialized copy of the xblock's fields
"""
@contract(block_key="BlockUsageLocator | seq[2]")
def robust_usage_key(block_key):
"""
create a course_key relative usage key for the block_key. If the block_key is in blocks,
use its correct category; otherwise, use 'unknown'.
The purpose for this is that some operations add pointers as they build up the
structure without worrying about order of creation. Because the category of the
usage_key is for the most part inert, it's better to hack a value than to work
out a dependency graph algorithm for those functions which may prereference blocks.
"""
# if this was taken from cache, then its fields are already converted
if isinstance(block_key, BlockUsageLocator):
return block_key.map_into_course(course_key)
elif not isinstance(block_key, BlockKey):
block_key = BlockKey(*block_key)
try:
return course_key.make_usage_key(
block_key.type, block_key.id
)
except KeyError:
return course_key.make_usage_key('unknown', block_key.id)
xblock_class = self.mixologist.mix(xblock_class)
# Make a shallow copy, so that we aren't manipulating a cached field dictionary
output_fields = dict(jsonfields)
for field_name, value in output_fields.iteritems():
if value:
field = xblock_class.fields.get(field_name)
if field is None:
continue
elif isinstance(field, Reference):
output_fields[field_name] = robust_usage_key(value)
elif isinstance(field, ReferenceList):
output_fields[field_name] = [robust_usage_key(ele) for ele in value]
elif isinstance(field, ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = robust_usage_key(subvalue)
return output_fields
def _get_index_if_valid(self, course_key, force=False):
"""
If the course_key identifies a course and points to its draft (or plausibly its draft),
then return the index entry.
raises VersionConflictError if not the right version
:param course_key: a CourseLocator
:param force: if false, raises VersionConflictError if the current head of the course != the one identified
by course_key
"""
if course_key.org is None or course_key.course is None or course_key.run is None or course_key.branch is None:
return None
else:
index_entry = self.get_course_index(course_key)
is_head = (
course_key.version_guid is None or
index_entry['versions'][course_key.branch] == course_key.version_guid
)
if (is_head or force):
return index_entry
else:
raise VersionConflictError(
course_key,
index_entry['versions'][course_key.branch]
)
def _find_local_root(self, element_to_find, possibility, tree):
if possibility not in tree:
return False
if element_to_find in tree[possibility]:
return True
for subtree in tree[possibility]:
if self._find_local_root(element_to_find, subtree, tree):
return True
return False
def _update_search_targets(self, index_entry, fields):
"""
Update the index entry if any of the given fields are in SEARCH_TARGET_DICT. (doesn't save
the changes, just changes them in the entry dict)
:param index_entry:
:param fields: a dictionary of fields and values usually only those explicitly set and already
ready for persisting (e.g., references converted to block_ids)
"""
for field_name, field_value in fields.iteritems():
if field_name in self.SEARCH_TARGET_DICT:
index_entry.setdefault('search_targets', {})[field_name] = field_value
def _update_head(self, course_key, index_entry, branch, new_id):
"""
Update the active index for the given course's branch to point to new_id
:param index_entry:
:param course_locator:
:param new_id:
"""
if not isinstance(new_id, ObjectId):
raise TypeError('new_id must be an ObjectId, but is {!r}'.format(new_id))
index_entry['versions'][branch] = new_id
self.update_course_index(course_key, index_entry)
def partition_xblock_fields_by_scope(self, xblock):
"""
Return a dictionary of scopes mapped to this xblock's explicitly set fields w/o any conversions
"""
# explicitly_set_fields_by_scope converts to json; so, avoiding it
# the existing partition_fields_by_scope works on a dict not an xblock
result = defaultdict(dict)
for field in xblock.fields.itervalues():
if field.is_set_on(xblock):
result[field.scope][field.name] = field.read_from(xblock)
return result
def _serialize_fields(self, category, fields):
"""
Convert any references to their serialized form. Handle some references already being unicoded
because the client passed them that way and nothing above this layer did the necessary deserialization.
Remove any fields which split or its kvs computes or adds but does not want persisted.
:param fields: a dict of fields
"""
assert isinstance(fields, dict)
xblock_class = XBlock.load_class(category, self.default_class)
xblock_class = self.mixologist.mix(xblock_class)
def reference_block_id(reference):
"""
Handle client possibly setting field to strings rather than keys to get the block_id
"""
# perhaps replace by fixing the views or Field Reference*.from_json to return a Key
if isinstance(reference, basestring):
reference = BlockUsageLocator.from_string(reference)
elif isinstance(reference, BlockKey):
return reference
return BlockKey.from_usage_key(reference)
for field_name, value in fields.iteritems():
if value is not None:
if isinstance(xblock_class.fields[field_name], Reference):
fields[field_name] = reference_block_id(value)
elif isinstance(xblock_class.fields[field_name], ReferenceList):
fields[field_name] = [
reference_block_id(ele) for ele in value
]
elif isinstance(xblock_class.fields[field_name], ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = reference_block_id(subvalue)
# should this recurse down dicts and lists just in case they contain datetime?
elif not isinstance(value, datetime.datetime): # don't convert datetimes!
fields[field_name] = xblock_class.fields[field_name].to_json(value)
return fields
def _new_structure(self, user_id, root_block_key, block_fields=None, definition_id=None):
"""
Internal function: create a structure element with no previous version. Must provide the root id
but not necessarily the info needed to create it (for the use case of publishing). If providing
root_category, must also provide block_fields and definition_id
"""
new_id = ObjectId()
if root_block_key is not None:
if block_fields is None:
block_fields = {}
blocks = {
root_block_key: self._new_block(
user_id, root_block_key.type, block_fields, definition_id, new_id
)
}
else:
blocks = {}
return {
'_id': new_id,
'root': root_block_key,
'previous_version': None,
'original_version': new_id,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'blocks': blocks,
'schema_version': self.SCHEMA_VERSION,
}
@contract(block_key=BlockKey)
def _get_parents_from_structure(self, block_key, structure):
"""
Given a structure, find block_key's parent in that structure. Note returns
the encoded format for parent
"""
return [
parent_block_key
for parent_block_key, value in structure['blocks'].iteritems()
if block_key in value.fields.get('children', [])
]
def _sync_children(self, source_parent, destination_parent, new_child):
"""
Reorder destination's children to the same as source's and remove any no longer in source.
Return the removed ones as orphans (a set).
"""
destination_reordered = []
destination_children = set(destination_parent.fields['children'])
source_children = source_parent.fields['children']
orphans = destination_children - set(source_children)
for child in source_children:
if child == new_child or child in destination_children:
destination_reordered.append(child)
destination_parent.fields['children'] = destination_reordered
return orphans
@contract(
block_key=BlockKey,
source_blocks="dict(BlockKey: *)",
destination_blocks="dict(BlockKey: *)",
blacklist="list(BlockKey) | str",
)
def _copy_subdag(self, user_id, destination_version, block_key, source_blocks, destination_blocks, blacklist):
"""
Update destination_blocks for the sub-dag rooted at block_key to be like the one in
source_blocks excluding blacklist.
Return any newly discovered orphans (as a set)
"""
orphans = set()
destination_block = destination_blocks.get(block_key)
new_block = source_blocks[block_key]
if destination_block:
# reorder children to correspond to whatever order holds for source.
# remove any which source no longer claims (put into orphans)
# add any which are being copied
source_children = new_block.fields.get('children', [])
existing_children = destination_block.fields.get('children', [])
destination_reordered = SparseList()
for child in existing_children:
try:
index = source_children.index(child)
destination_reordered[index] = child
except ValueError:
orphans.add(BlockKey(*child))
if blacklist != EXCLUDE_ALL:
for index, child in enumerate(source_children):
if child not in blacklist:
destination_reordered[index] = child
# the history of the published leaps between publications and only points to
# previously published versions.
previous_version = destination_block.edit_info.update_version
destination_block = copy.deepcopy(new_block)
destination_block.fields['children'] = destination_reordered.compact_list()
destination_block.edit_info.previous_version = previous_version
destination_block.edit_info.update_version = destination_version
destination_block.edit_info.edited_by = user_id
destination_block.edit_info.edited_on = datetime.datetime.now(UTC)
else:
destination_block = self._new_block(
user_id, new_block.block_type,
self._filter_blacklist(copy.copy(new_block.fields), blacklist),
new_block.definition,
destination_version,
raw=True,
block_defaults=new_block.defaults
)
# Extend the block's new edit_info with any extra edit_info fields from the source (e.g. original_usage):
for key, val in new_block.edit_info.to_storable().iteritems():
if getattr(destination_block.edit_info, key) is None:
setattr(destination_block.edit_info, key, val)
# introduce new edit info field for tracing where copied/published blocks came
destination_block.edit_info.source_version = new_block.edit_info.update_version
if blacklist != EXCLUDE_ALL:
for child in destination_block.fields.get('children', []):
if child not in blacklist:
orphans.update(
self._copy_subdag(
user_id, destination_version, BlockKey(*child), source_blocks, destination_blocks, blacklist
)
)
destination_blocks[block_key] = destination_block
return orphans
@contract(blacklist='list(BlockKey) | str')
def _filter_blacklist(self, fields, blacklist):
"""
Filter out blacklist from the children field in fields. Will construct a new list for children;
so, no need to worry about copying the children field, but it will modify fiels.
"""
if blacklist == EXCLUDE_ALL:
fields['children'] = []
else:
fields['children'] = [child for child in fields.get('children', []) if BlockKey(*child) not in blacklist]
return fields
@contract(orphan=BlockKey)
def _delete_if_true_orphan(self, orphan, structure):
"""
Delete the orphan and any of its descendants which no longer have parents.
"""
if len(self._get_parents_from_structure(orphan, structure)) == 0:
for child in structure['blocks'][orphan].fields.get('children', []):
self._delete_if_true_orphan(BlockKey(*child), structure)
del structure['blocks'][orphan]
@contract(returns=BlockData)
def _new_block(self, user_id, category, block_fields, definition_id, new_id, raw=False, block_defaults=None):
"""
Create the core document structure for a block.
:param block_fields: the settings and children scoped fields as a dict or son
:param definition_id: the pointer to the content scoped fields
:param new_id: the structure's version id
:param raw: true if this block already has all references serialized
"""
if not raw:
block_fields = self._serialize_fields(category, block_fields)
document = {
'block_type': category,
'definition': definition_id,
'fields': block_fields,
'edit_info': {
'edited_on': datetime.datetime.now(UTC),
'edited_by': user_id,
'previous_version': None,
'update_version': new_id
}
}
if block_defaults:
document['defaults'] = block_defaults
return BlockData(**document)
@contract(block_key=BlockKey, returns='BlockData | None')
def _get_block_from_structure(self, structure, block_key):
"""
Encodes the block key before retrieving it from the structure to ensure it can
be a json dict key.
"""
return structure['blocks'].get(block_key)
@contract(block_key=BlockKey, content=BlockData)
def _update_block_in_structure(self, structure, block_key, content):
"""
Encodes the block key before accessing it in the structure to ensure it can
be a json dict key.
"""
structure['blocks'][block_key] = content
@autoretry_read()
def find_courses_by_search_target(self, field_name, field_value):
"""
Find all the courses which cached that they have the given field with the given value.
Returns: list of branch-agnostic course_keys
"""
entries = self.find_matching_course_indexes(
search_targets={field_name: field_value}
)
return [
CourseLocator(entry['org'], entry['course'], entry['run']) # Branch agnostic
for entry in entries
]
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
return self.find_courses_by_search_target('wiki_slug', wiki_slug)
def heartbeat(self):
"""
Check that the db is reachable.
"""
return {ModuleStoreEnum.Type.split: self.db_connection.heartbeat()}
def create_runtime(self, course_entry, lazy):
"""
Create the proper runtime for this course
"""
return CachingDescriptorSystem(
modulestore=self,
course_entry=course_entry,
module_data={},
lazy=lazy,
default_class=self.default_class,
error_tracker=self.error_tracker,
render_template=self.render_template,
mixins=self.xblock_mixins,
select=self.xblock_select,
services=self.services,
)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
self.db_connection.ensure_indexes()
class SparseList(list):
"""
Enable inserting items into a list in arbitrary order and then retrieving them.
"""
# taken from http://stackoverflow.com/questions/1857780/sparse-assignment-list-in-python
def __setitem__(self, index, value):
"""
Add value to the list ensuring the list is long enough to accommodate it at the given index
"""
missing = index - len(self) + 1
if missing > 0:
self.extend([None] * missing)
list.__setitem__(self, index, value)
def compact_list(self):
"""
Return as a regular lists w/ all Nones removed
"""
return [ele for ele in self if ele is not None]
| agpl-3.0 |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Lib/distutils/util.py | 64 | 18037 | """distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
__revision__ = "$Id$"
import sys, os, string, re
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
from distutils.errors import DistutilsByteCompileError
def get_platform ():
"""Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = string.find(sys.version, prefix)
if i == -1:
return sys.platform
j = string.find(sys.version, ")", i)
look = sys.version[i+len(prefix):j].lower()
if look=='amd64':
return 'win-amd64'
if look=='itanium':
return 'win-ia64'
return sys.platform
# Set for cross builds explicitly
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxint]
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
import _osx_support, distutils.sysconfig
osname, release, machine = _osx_support.get_platform_osx(
distutils.sysconfig.get_config_vars(),
osname, release, machine)
return "%s-%s-%s" % (osname, release, machine)
# get_platform ()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
if os.name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
elif os.name == 'nt':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
elif os.name == 'os2':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == os.sep:
path = path[1:]
return os.path.join(new_root, path)
else:
raise DistutilsPlatformError, \
"nothing known about platform '%s'" % os.name
_environ_checked = 0
def check_environ ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and 'HOME' not in os.environ:
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars (s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst (match, local_vars=local_vars):
var_name = match.group(1)
if var_name in local_vars:
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError, var:
raise ValueError, "invalid variable '$%s'" % var
# subst_vars ()
def grok_environment_error (exc, prefix="error: "):
# Function kept for backward compatibility.
# Used to try clever things with EnvironmentErrors,
# but nowadays str(exception) produces good messages.
return prefix + str(exc)
# Needed by 'split_quoted()'
_wordchars_re = _squote_re = _dquote_re = None
def _init_regex():
global _wordchars_re, _squote_re, _dquote_re
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
def split_quoted (s):
"""Split a string up according to Unix shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
"""
# This is a nice algorithm for splitting up a single string, since it
# doesn't require character-by-character examination. It was a little
# bit of a brain-bender to get it working right, though...
if _wordchars_re is None: _init_regex()
s = string.strip(s)
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = string.lstrip(s[end:])
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError, \
"this can't happen (bad char '%c')" % s[end]
if m is None:
raise ValueError, \
"bad string (mismatched %s quotes?)" % s[end]
(beg, end) = m.span()
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
if pos >= len(s):
words.append(s)
break
return words
# split_quoted ()
def execute (func, args, msg=None, verbose=0, dry_run=0):
"""Perform some action that affects the outside world (eg. by
writing to the filesystem). Such actions are special because they
are disabled by the 'dry_run' flag. This method takes care of all
that bureaucracy for you; all you have to do is supply the
function to call and an argument tuple for it (to embody the
"external action" being performed), and an optional message to
print.
"""
if msg is None:
msg = "%s%r" % (func.__name__, args)
if msg[-2:] == ',)': # correct for singleton tuple
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
func(*args)
def strtobool (val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = string.lower(val)
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError, "invalid truth value %r" % (val,)
def byte_compile (py_files,
optimize=0, force=0,
prefix=None, base_dir=None,
verbose=1, dry_run=0,
direct=None):
"""Byte-compile a collection of Python source files to either .pyc
or .pyo files in the same directory. 'py_files' is a list of files
to compile; any files that don't end in ".py" are silently skipped.
'optimize' must be one of the following:
0 - don't optimize (generate .pyc)
1 - normal optimization (like "python -O")
2 - extra optimization (like "python -OO")
If 'force' is true, all files are recompiled regardless of
timestamps.
The source filename encoded in each bytecode file defaults to the
filenames listed in 'py_files'; you can modify these with 'prefix' and
'basedir'. 'prefix' is a string that will be stripped off of each
source filename, and 'base_dir' is a directory name that will be
prepended (after 'prefix' is stripped). You can supply either or both
(or neither) of 'prefix' and 'base_dir', as you wish.
If 'dry_run' is true, doesn't actually do anything that would
affect the filesystem.
Byte-compilation is either done directly in this interpreter process
with the standard py_compile module, or indirectly by writing a
temporary script and executing it. Normally, you should let
'byte_compile()' figure out to use direct compilation or not (see
the source for details). The 'direct' flag is used by the script
generated in indirect mode; unless you know what you're doing, leave
it set to None.
"""
# nothing is done if sys.dont_write_bytecode is True
if sys.dont_write_bytecode:
raise DistutilsByteCompileError('byte-compiling is disabled.')
# First, if the caller didn't force us into direct or indirect mode,
# figure out which mode we should be in. We take a conservative
# approach: choose direct mode *only* if the current interpreter is
# in debug mode and optimize is 0. If we're not in debug mode (-O
# or -OO), we don't know which level of optimization this
# interpreter is running with, so we can't do direct
# byte-compilation and be certain that it's the right thing. Thus,
# always compile indirectly if the current interpreter is in either
# optimize mode, or if either optimization level was requested by
# the caller.
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
try:
from tempfile import mkstemp
(script_fd, script_name) = mkstemp(".py")
except ImportError:
from tempfile import mktemp
(script_fd, script_name) = None, mktemp(".py")
log.info("writing byte-compilation script '%s'", script_name)
if not dry_run:
if script_fd is not None:
script = os.fdopen(script_fd, "w")
else:
script = open(script_name, "w")
script.write("""\
from distutils.util import byte_compile
files = [
""")
# XXX would be nice to write absolute filenames, just for
# safety's sake (script should be more robust in the face of
# chdir'ing before running it). But this requires abspath'ing
# 'prefix' as well, and that breaks the hack in build_lib's
# 'byte_compile()' method that carefully tacks on a trailing
# slash (os.sep really) to make sure the prefix here is "just
# right". This whole prefix business is rather delicate -- the
# problem is that it's really a directory, but I'm treating it
# as a dumb string, so trailing slashes and so forth matter.
#py_files = map(os.path.abspath, py_files)
#if prefix:
# prefix = os.path.abspath(prefix)
script.write(string.join(map(repr, py_files), ",\n") + "]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
prefix=%r, base_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, prefix, base_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, "-O")
elif optimize == 2:
cmd.insert(1, "-OO")
spawn(cmd, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
dry_run=dry_run)
# "Direct" byte-compilation: use the py_compile module to compile
# right here, right now. Note that the script generated in indirect
# mode simply calls 'byte_compile()' in direct mode, a weird sort of
# cross-process recursion. Hey, it works!
else:
from py_compile import compile
for file in py_files:
if file[-3:] != ".py":
# This lets us be lazy and not filter filenames in
# the "install_lib" command.
continue
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
cfile = file + (__debug__ and "c" or "o")
dfile = file
if prefix:
if file[:len(prefix)] != prefix:
raise ValueError, \
("invalid prefix: filename %r doesn't start with %r"
% (file, prefix))
dfile = dfile[len(prefix):]
if base_dir:
dfile = os.path.join(base_dir, dfile)
cfile_base = os.path.basename(cfile)
if direct:
if force or newer(file, cfile):
log.info("byte-compiling %s to %s", file, cfile_base)
if not dry_run:
compile(file, cfile, dfile)
else:
log.debug("skipping byte-compilation of %s to %s",
file, cfile_base)
# byte_compile ()
def rfc822_escape (header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = string.split(header, '\n')
header = string.join(lines, '\n' + 8*' ')
return header
| gpl-3.0 |
ukanga/SickRage | lib/unidecode/x053.py | 252 | 4616 | data = (
'Yun ', # 0x00
'Mwun ', # 0x01
'Nay ', # 0x02
'Gai ', # 0x03
'Gai ', # 0x04
'Bao ', # 0x05
'Cong ', # 0x06
'[?] ', # 0x07
'Xiong ', # 0x08
'Peng ', # 0x09
'Ju ', # 0x0a
'Tao ', # 0x0b
'Ge ', # 0x0c
'Pu ', # 0x0d
'An ', # 0x0e
'Pao ', # 0x0f
'Fu ', # 0x10
'Gong ', # 0x11
'Da ', # 0x12
'Jiu ', # 0x13
'Qiong ', # 0x14
'Bi ', # 0x15
'Hua ', # 0x16
'Bei ', # 0x17
'Nao ', # 0x18
'Chi ', # 0x19
'Fang ', # 0x1a
'Jiu ', # 0x1b
'Yi ', # 0x1c
'Za ', # 0x1d
'Jiang ', # 0x1e
'Kang ', # 0x1f
'Jiang ', # 0x20
'Kuang ', # 0x21
'Hu ', # 0x22
'Xia ', # 0x23
'Qu ', # 0x24
'Bian ', # 0x25
'Gui ', # 0x26
'Qie ', # 0x27
'Zang ', # 0x28
'Kuang ', # 0x29
'Fei ', # 0x2a
'Hu ', # 0x2b
'Tou ', # 0x2c
'Gui ', # 0x2d
'Gui ', # 0x2e
'Hui ', # 0x2f
'Dan ', # 0x30
'Gui ', # 0x31
'Lian ', # 0x32
'Lian ', # 0x33
'Suan ', # 0x34
'Du ', # 0x35
'Jiu ', # 0x36
'Qu ', # 0x37
'Xi ', # 0x38
'Pi ', # 0x39
'Qu ', # 0x3a
'Yi ', # 0x3b
'Qia ', # 0x3c
'Yan ', # 0x3d
'Bian ', # 0x3e
'Ni ', # 0x3f
'Qu ', # 0x40
'Shi ', # 0x41
'Xin ', # 0x42
'Qian ', # 0x43
'Nian ', # 0x44
'Sa ', # 0x45
'Zu ', # 0x46
'Sheng ', # 0x47
'Wu ', # 0x48
'Hui ', # 0x49
'Ban ', # 0x4a
'Shi ', # 0x4b
'Xi ', # 0x4c
'Wan ', # 0x4d
'Hua ', # 0x4e
'Xie ', # 0x4f
'Wan ', # 0x50
'Bei ', # 0x51
'Zu ', # 0x52
'Zhuo ', # 0x53
'Xie ', # 0x54
'Dan ', # 0x55
'Mai ', # 0x56
'Nan ', # 0x57
'Dan ', # 0x58
'Ji ', # 0x59
'Bo ', # 0x5a
'Shuai ', # 0x5b
'Bu ', # 0x5c
'Kuang ', # 0x5d
'Bian ', # 0x5e
'Bu ', # 0x5f
'Zhan ', # 0x60
'Qia ', # 0x61
'Lu ', # 0x62
'You ', # 0x63
'Lu ', # 0x64
'Xi ', # 0x65
'Gua ', # 0x66
'Wo ', # 0x67
'Xie ', # 0x68
'Jie ', # 0x69
'Jie ', # 0x6a
'Wei ', # 0x6b
'Ang ', # 0x6c
'Qiong ', # 0x6d
'Zhi ', # 0x6e
'Mao ', # 0x6f
'Yin ', # 0x70
'Wei ', # 0x71
'Shao ', # 0x72
'Ji ', # 0x73
'Que ', # 0x74
'Luan ', # 0x75
'Shi ', # 0x76
'Juan ', # 0x77
'Xie ', # 0x78
'Xu ', # 0x79
'Jin ', # 0x7a
'Que ', # 0x7b
'Wu ', # 0x7c
'Ji ', # 0x7d
'E ', # 0x7e
'Qing ', # 0x7f
'Xi ', # 0x80
'[?] ', # 0x81
'Han ', # 0x82
'Zhan ', # 0x83
'E ', # 0x84
'Ting ', # 0x85
'Li ', # 0x86
'Zhe ', # 0x87
'Han ', # 0x88
'Li ', # 0x89
'Ya ', # 0x8a
'Ya ', # 0x8b
'Yan ', # 0x8c
'She ', # 0x8d
'Zhi ', # 0x8e
'Zha ', # 0x8f
'Pang ', # 0x90
'[?] ', # 0x91
'He ', # 0x92
'Ya ', # 0x93
'Zhi ', # 0x94
'Ce ', # 0x95
'Pang ', # 0x96
'Ti ', # 0x97
'Li ', # 0x98
'She ', # 0x99
'Hou ', # 0x9a
'Ting ', # 0x9b
'Zui ', # 0x9c
'Cuo ', # 0x9d
'Fei ', # 0x9e
'Yuan ', # 0x9f
'Ce ', # 0xa0
'Yuan ', # 0xa1
'Xiang ', # 0xa2
'Yan ', # 0xa3
'Li ', # 0xa4
'Jue ', # 0xa5
'Sha ', # 0xa6
'Dian ', # 0xa7
'Chu ', # 0xa8
'Jiu ', # 0xa9
'Qin ', # 0xaa
'Ao ', # 0xab
'Gui ', # 0xac
'Yan ', # 0xad
'Si ', # 0xae
'Li ', # 0xaf
'Chang ', # 0xb0
'Lan ', # 0xb1
'Li ', # 0xb2
'Yan ', # 0xb3
'Yan ', # 0xb4
'Yuan ', # 0xb5
'Si ', # 0xb6
'Gong ', # 0xb7
'Lin ', # 0xb8
'Qiu ', # 0xb9
'Qu ', # 0xba
'Qu ', # 0xbb
'Uk ', # 0xbc
'Lei ', # 0xbd
'Du ', # 0xbe
'Xian ', # 0xbf
'Zhuan ', # 0xc0
'San ', # 0xc1
'Can ', # 0xc2
'Can ', # 0xc3
'Can ', # 0xc4
'Can ', # 0xc5
'Ai ', # 0xc6
'Dai ', # 0xc7
'You ', # 0xc8
'Cha ', # 0xc9
'Ji ', # 0xca
'You ', # 0xcb
'Shuang ', # 0xcc
'Fan ', # 0xcd
'Shou ', # 0xce
'Guai ', # 0xcf
'Ba ', # 0xd0
'Fa ', # 0xd1
'Ruo ', # 0xd2
'Shi ', # 0xd3
'Shu ', # 0xd4
'Zhuo ', # 0xd5
'Qu ', # 0xd6
'Shou ', # 0xd7
'Bian ', # 0xd8
'Xu ', # 0xd9
'Jia ', # 0xda
'Pan ', # 0xdb
'Sou ', # 0xdc
'Gao ', # 0xdd
'Wei ', # 0xde
'Sou ', # 0xdf
'Die ', # 0xe0
'Rui ', # 0xe1
'Cong ', # 0xe2
'Kou ', # 0xe3
'Gu ', # 0xe4
'Ju ', # 0xe5
'Ling ', # 0xe6
'Gua ', # 0xe7
'Tao ', # 0xe8
'Kou ', # 0xe9
'Zhi ', # 0xea
'Jiao ', # 0xeb
'Zhao ', # 0xec
'Ba ', # 0xed
'Ding ', # 0xee
'Ke ', # 0xef
'Tai ', # 0xf0
'Chi ', # 0xf1
'Shi ', # 0xf2
'You ', # 0xf3
'Qiu ', # 0xf4
'Po ', # 0xf5
'Xie ', # 0xf6
'Hao ', # 0xf7
'Si ', # 0xf8
'Tan ', # 0xf9
'Chi ', # 0xfa
'Le ', # 0xfb
'Diao ', # 0xfc
'Ji ', # 0xfd
'[?] ', # 0xfe
'Hong ', # 0xff
)
| gpl-3.0 |
harryface/__1255548754516tftffvvgcgvgvg | blog/models.py | 1 | 1433 | from django.db import models
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
# Create your models here.
class Post (models.Model):
title = models.CharField(max_length=40)
post_image = models.ImageField(upload_to='static/blog', blank=True)
post_body = models.TextField(blank = True)
slug = models.SlugField(max_length=200, editable = False)
def get_absolute_url(self):
return reverse('post_detail', args=[self.id, self.slug])
def save(self, *args, **kwargs):
if not self.pk:
self.slug = slugify(self.title)
try:
a = Post.objects.get(pk = self.pk)
if a.post_image != self.post_image:
a.post_image.delete(save=False)
self.post_image = self.post_image
except:
pass
return super(Post, self).save(*args, **kwargs)
class Gallery (models.Model):
title = models.CharField(max_length=40)
image = models.ImageField(upload_to='static/gallery', blank=True)
description = models.TextField(blank = True)
slug = models.SlugField(max_length=200, editable = False)
def get_absolute_url(self):
return reverse('gallery_detail', args=[self.id, self.slug])
def save(self, *args, **kwargs):
if not self.pk:
self.slug = slugify(self.title)
try:
a = Gallery.objects.get(pk = self.pk)
if a.image != self.image:
a.image.delete(save=False)
self.image = self.image
except:
pass
return super(Gallery, self).save(*args, **kwargs)
| gpl-3.0 |
EmreAtes/spack | var/spack/repos/builtin/packages/py-numpydoc/package.py | 5 | 1666 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyNumpydoc(PythonPackage):
"""numpydoc - Numpy's Sphinx extensions"""
homepage = "https://github.com/numpy/numpydoc"
url = "https://pypi.io/packages/source/n/numpydoc/numpydoc-0.6.0.tar.gz"
version('0.6.0', '5f1763c44e613850d56ba1b1cf1cb146')
depends_on('python@2.6:2.8,3.3:')
depends_on('py-setuptools', type='build')
depends_on('py-sphinx@1.0.1:', type='build')
| lgpl-2.1 |
ovnicraft/openerp-restaurant | account_analytic_analysis/__openerp__.py | 5 | 2322 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Contracts Management',
'version': '1.1',
'category': 'Sales Management',
'description': """
This module is for modifying account analytic view to show important data to project manager of services companies.
===================================================================================================================
Adds menu to show relevant information to each manager.You can also view the report of account analytic summary user-wise as well as month-wise.
""",
'author': 'Camptocamp',
'website': 'http://www.camptocamp.com/',
'images': ['images/bill_tasks_works.jpeg','images/overpassed_accounts.jpeg'],
'depends': ['hr_timesheet_invoice', 'sale'], #although sale is technically not required to install this module, all menuitems are located under 'Sales' application
'data': [
'security/ir.model.access.csv',
'security/account_analytic_analysis_security.xml',
'account_analytic_analysis_view.xml',
'account_analytic_analysis_cron.xml',
'res_config_view.xml',
],
'css': [
'static/src/css/analytic.css'
],
'demo': ['analytic_account_demo.xml'],
'test': ['test/account_analytic_analysis.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mtnman38/Aggregate | Executables/Aggregate 0.8.7 for Macintosh.app/Contents/Resources/lib/python2.7/email/mime/message.py | 573 | 1286 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Class representing message/* MIME documents."""
__all__ = ['MIMEMessage']
from email import message
from email.mime.nonmultipart import MIMENonMultipart
class MIMEMessage(MIMENonMultipart):
"""Class representing message/* MIME documents."""
def __init__(self, _msg, _subtype='rfc822'):
"""Create a message/* type MIME document.
_msg is a message object and must be an instance of Message, or a
derived class of Message, otherwise a TypeError is raised.
Optional _subtype defines the subtype of the contained message. The
default is "rfc822" (this is defined by the MIME standard, even though
the term "rfc822" is technically outdated by RFC 2822).
"""
MIMENonMultipart.__init__(self, 'message', _subtype)
if not isinstance(_msg, message.Message):
raise TypeError('Argument is not an instance of Message')
# It's convenient to use this base class method. We need to do it
# this way or we'll get an exception
message.Message.attach(self, _msg)
# And be sure our default type is set correctly
self.set_default_type('message/rfc822')
| gpl-2.0 |
parasite-genomics/Pipelines | HDR.py | 1 | 5656 | #! /usr/bin/env python
# input parameters
header = raw_input('\nhdr: header (1=Yes, 0=No) ?\n')
pos_diff = raw_input('hdr: distance (clustering condition) ?\n')
cl_size = raw_input('hdr: min cluster size ?\n')
collapse_size = raw_input('hdr: min distance to collapse clusters ?\n\n')
infile = raw_input('hdr: data filename ?\n')
cl_out = raw_input('hdr: cl output filename ?\n')
mcl_out = raw_input('hdr: mcl output filename ?\n')
no_cl_out = raw_input('hdr: no_cl output filename ?\n')
no_mcl_out = raw_input('hdr: no_mcl output file ?\n')
# check for invalid input parameters
e1='\nhdr terminated: input parameter error.\n'
e_num='is not a positive integer.\n'
e_file='is not a valid file name.\n'
#header
try:
num = int(header)
except ValueError, TypeError:
print e1,'\"',header.strip(),'\"',e_num
raise SystemExit
if (int(header) !=0 and int(header) !=1):
print e1,'header flag can only be 0 or 1\n'
raise SystemExit
#poss_diff
try:
num = int(pos_diff)
except ValueError, TypeError:
print e1,'\"',pos_diff.strip(),'\"',e_num
raise SystemExit
if (int(pos_diff) < 0):
print e1,'\"',pos_diff.strip(),'\"',e_num
raise SystemExit
#cl_size
try:
num = int(cl_size)
except ValueError, TypeError:
print e1,'\"',cl_size.strip(),'\"',e_num
raise SystemExit
if (int(cl_size) < 0):
print e1,'\"',cl_size.strip(),'\"',e_num
raise SystemExit
#collapse_size
try:
num = int(collapse_size)
except ValueError, TypeError:
print e1,'\"',collapse_size.strip(),'\"',e_num
raise SystemExit
if (int(collapse_size) < 0):
print e1,'\"',collapse_size.strip(),'\"',e_num
raise SystemExit
#infile
if(len(infile.split())!=1):
print e1,'\"',infile.strip(),'\"',e_file
raise SystemExit
#cl_out
if(len(cl_out.split())!=1):
print e1,'\"',cl_out.strip(),'\"',e_file
raise SystemExit
#mcl_out
if(len(mcl_out.split())!=1):
print e1,'\"',mcl_out.strip(),'\"',e_file
raise SystemExit
#no_cl_out
if(len(no_cl_out.split())!=1):
print e1,'\"',no_cl_out.strip(),'\"',e_file
raise SystemExit
#no_mcl_out
if(len(no_mcl_out.split())!=1):
print e1,'\"',no_mcl_out.strip(),'\"',e_file
raise SystemExit
# read data from file
chrom=[]
pos=[]
all=[]
headerline=[]
with open(infile.strip(),'r') as fin:
read_line = lambda: fin.readline()
for line in iter(read_line,''):
if int(header)==1:
headerline.append(line.strip())
#print '\nhdr: header is',headerline
header=2
continue
if(len(line.split())>1):
chrom.append(line.split()[0])
pos.append(line.split()[1])
all.append(line.strip())
else:
print 'hdr warning: line with',len(line.split()),'column(s) ignored'
print '\nhdr:',len(pos),'lines','\n','hdr:',len(all[0].split()),'columns (in first line)'
for i in range(len(pos)-1):
if len(all[i].split()) != len(all[0].split()):
print 'hdr warning:',len(all[i].split()),'columns in line',i
# identify clusters
c0=[0]
c1=[]
for i in range(1,len(pos)):
if chrom[i]!=chrom[i-1] or int(pos[i])-int(pos[i-1])>int(pos_diff):
c1.append(i-1)
c0.append(i)
c1.append(len(all)-1)
print 'hdr:',len(c0),'cluster(s)'
# identify clusters of cl_size or larger
cl0=[]
cl1=[]
for i in range(1,len(c0)):
if int(c1[i])-int(c0[i])+1>=int(cl_size):
cl1.append(c1[i])
cl0.append(c0[i])
print 'hdr:',len(cl0),'cluster(s) size',int(cl_size),'or larger'
#output clusters of cl_size or larger
with open(cl_out.strip(),'w') as fclout:
for i in range(len(cl0)):
for j in range(int(cl0[i]),int(cl1[i])+1):
if j==int(cl0[i]):
fclout.write( '#' + chrom[int(cl0[i])] + '\t' + str(int(cl1[i]) - int(cl0[i]) + 1) + '\t' + pos[int(cl0[i])] + '\t' + pos[int(cl1[i])] + '\n' )
fclout.write(all[j]+'\n')
fclout.write('\n')
# output no-clusters (variants not in any cluster of cl_size or larger)
with open(no_cl_out.strip(),'w') as fnoclout:
if cl0: #check cl0 is not empty
for j in range(0,int(cl0[0])):
fnoclout.write(all[j]+'\n')
for i in range(len(cl0)-1):
for j in range(int(cl1[i])+1,int(cl0[i+1])):
fnoclout.write(all[j]+'\n')
for j in range(int(cl1[-1])+1,len(pos)):
fnoclout.write(all[j]+'\n')
# identify megaclusters:
# merged clusters (of cl_size or larger) that are apart collapse_size or less, including the variants between them
mcl0=[cl0[0]]
mcl1=[]
for i in range(1,len(cl0)):
if chrom[int(cl0[i])]!=chrom[int(cl1[i-1])] or int(pos[int(cl0[i])])-int(pos[int(cl1[i-1])])>int(collapse_size):
mcl1.append(cl1[i-1])
mcl0.append(cl0[i])
mcl1.append(cl1[-1])
print 'hdr:',len(mcl0),'megacluster(s)\n'
# output megaclusters
#with open(mcl_out.strip(),'w') as fmclout:
with open(mcl_out.strip(),'w') as fmclout:
for i in range(len(mcl0)):
for j in range(int(mcl0[i]),int(mcl1[i])+1):
if j==int(mcl0[i]):
fmclout.write( '\n#' + chrom[int(mcl0[i])] + '\t' + str(int(mcl1[i]) - int(mcl0[i]) + 1) + '\t' + pos[int(mcl0[i])] + '\t' + pos[int(mcl1[i])] + '\n' )
fmclout.write(all[j]+'\n')
# output no-megaclusters (variants not in any megacluster)
with open(no_mcl_out.strip(),'w') as fnomclout:
if mcl0: #check mcl0 is not empty
for j in range(0,int(mcl0[0])):
fnomclout.write(all[j]+'\n')
for i in range(len(mcl0)-1):
for j in range(int(mcl1[i])+1,int(mcl0[i+1])):
fnomclout.write(all[j]+'\n')
for j in range(int(mcl1[-1])+1,len(pos)):
fnomclout.write(all[j]+'\n')
| apache-2.0 |
BrianVermeire/PyFR | pyfr/backends/mic/base.py | 1 | 2224 | # -*- coding: utf-8 -*-
import numpy as np
from pyfr.backends.base import BaseBackend
from pyfr.mpiutil import get_local_rank
class MICBackend(BaseBackend):
name = 'mic'
def __init__(self, cfg):
super().__init__(cfg)
import pymic as mic
# Get the device ID to use
devid = cfg.get('backend-mic', 'device-id', 'local-rank')
# Handle the local-rank case
if devid == 'local-rank':
devid = str(get_local_rank())
# Get a handle to the desired device
self.dev = mic.devices[int(devid)]
# Default stream
self.sdflt = self.dev.get_default_stream()
# Take the alignment requirement to be 64-bytes
self.alignb = 64
# Compute the SoA size
self.soasz = self.alignb // np.dtype(self.fpdtype).itemsize
from pyfr.backends.mic import (blasext, cblas, packing, provider,
types)
# Register our data types
self.base_matrix_cls = types.MICMatrixBase
self.const_matrix_cls = types.MICConstMatrix
self.matrix_cls = types.MICMatrix
self.matrix_bank_cls = types.MICMatrixBank
self.matrix_rslice_cls = types.MICMatrixRSlice
self.queue_cls = types.MICQueue
self.view_cls = types.MICView
self.xchg_matrix_cls = types.MICXchgMatrix
self.xchg_view_cls = types.MICXchgView
# Kernel provider classes
kprovcls = [provider.MICPointwiseKernelProvider,
blasext.MICBlasExtKernels,
packing.MICPackingKernels,
cblas.MICCBLASKernels]
self._providers = [k(self) for k in kprovcls]
# Pointwise kernels
self.pointwise = self._providers[0]
def _malloc_impl(self, nbytes):
stream = self.sdflt
# Allocate an empty buffer on the device
buf = stream.allocate_device_memory(nbytes)
# Attach the raw device pointer
buf.dev_ptr = stream.translate_device_pointer(buf)
# Zero the buffer
zeros = np.zeros(nbytes, dtype=np.uint8)
stream.transfer_host2device(zeros.ctypes.data, buf, nbytes)
stream.sync()
return buf
| bsd-3-clause |
nikoonia/gem5v | src/mem/cache/BaseCache.py | 2 | 4044 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.params import *
from m5.proxy import *
from MemObject import MemObject
from Prefetcher import BasePrefetcher
class BaseCache(MemObject):
type = 'BaseCache'
assoc = Param.Int("associativity")
block_size = Param.Int("block size in bytes")
hit_latency = Param.Cycles("The hit latency for this cache")
response_latency = Param.Cycles(
"Additional cache latency for the return path to core on a miss");
hash_delay = Param.Cycles(1, "time in cycles of hash access")
max_miss_count = Param.Counter(0,
"number of misses to handle before calling exit")
mshrs = Param.Int("number of MSHRs (max outstanding requests)")
prioritizeRequests = Param.Bool(False,
"always service demand misses first")
repl = Param.Repl(NULL, "replacement policy")
size = Param.MemorySize("capacity in bytes")
forward_snoops = Param.Bool(True,
"forward snoops from mem side to cpu side")
is_top_level = Param.Bool(False, "Is this cache at the top level (e.g. L1)")
subblock_size = Param.Int(0,
"Size of subblock in IIC used for compression")
tgts_per_mshr = Param.Int("max number of accesses per MSHR")
trace_addr = Param.Addr(0, "address to trace")
two_queue = Param.Bool(False,
"whether the lifo should have two queue replacement")
write_buffers = Param.Int(8, "number of write buffers")
prefetch_on_access = Param.Bool(False,
"notify the hardware prefetcher on every access (not just misses)")
prefetcher = Param.BasePrefetcher(NULL,"Prefetcher attached to cache")
cpu_side = SlavePort("Port on side closer to CPU")
mem_side = MasterPort("Port on side closer to MEM")
addr_ranges = VectorParam.AddrRange([AllMemory], "The address range for the CPU-side port")
system = Param.System(Parent.any, "System we belong to")
| bsd-3-clause |
tomaaron/raiden | raiden/tests/integration/test_endpointregistry.py | 1 | 1619 | # -*- coding: utf-8 -*-
import pytest
from raiden.utils import make_address, get_contract_path, privatekey_to_address
from raiden.network.discovery import ContractDiscovery
@pytest.mark.timeout(60)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('poll_timeout', [80])
def test_endpointregistry(private_keys, blockchain_services):
chain = blockchain_services.blockchain_services[0]
my_address = privatekey_to_address(private_keys[0])
endpointregistry_address = chain.deploy_contract(
'EndpointRegistry',
get_contract_path('EndpointRegistry.sol'),
)
discovery_proxy = chain.discovery(endpointregistry_address)
contract_discovery = ContractDiscovery(my_address, discovery_proxy)
unregistered_address = make_address()
# get should raise for unregistered addresses
with pytest.raises(KeyError):
contract_discovery.get(my_address)
with pytest.raises(KeyError):
contract_discovery.get(unregistered_address)
assert contract_discovery.nodeid_by_host_port(('127.0.0.1', 44444)) is None
contract_discovery.register(my_address, '127.0.0.1', 44444)
assert contract_discovery.nodeid_by_host_port(('127.0.0.1', 44444)) == my_address
assert contract_discovery.get(my_address) == ('127.0.0.1', 44444)
contract_discovery.register(my_address, '127.0.0.1', 88888)
assert contract_discovery.nodeid_by_host_port(('127.0.0.1', 88888)) == my_address
assert contract_discovery.get(my_address) == ('127.0.0.1', 88888)
with pytest.raises(KeyError):
contract_discovery.get(unregistered_address)
| mit |
naufraghi/python-xlib | examples/childwin.py | 3 | 3884 | #!/usr/bin/python
#
# examples/childwin.py -- demonstrate child windows.
#
# Copyright (C) 2008 David Bronke <whitelynx@gmail.com>
# Copyright (C) 2002 Peter Liljenberg <petli@ctrl-c.liu.se>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import os
# Change path so we find Xlib
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from Xlib import X, display, Xutil
# Application window
class Window:
def __init__(self, display):
self.d = display
# Find which screen to open the window on
self.screen = self.d.screen()
# background pattern
bgsize = 20
bgpm = self.screen.root.create_pixmap(
bgsize,
bgsize,
self.screen.root_depth
)
bggc = self.screen.root.create_gc(
foreground=self.screen.black_pixel,
background=self.screen.black_pixel
)
bgpm.fill_rectangle(bggc, 0, 0, bgsize, bgsize)
bggc.change(foreground=self.screen.white_pixel)
bgpm.arc(bggc, -bgsize / 2, 0, bgsize, bgsize, 0, 360 * 64)
bgpm.arc(bggc, bgsize / 2, 0, bgsize, bgsize, 0, 360 * 64)
bgpm.arc(bggc, 0, -bgsize / 2, bgsize, bgsize, 0, 360 * 64)
bgpm.arc(bggc, 0, bgsize / 2, bgsize, bgsize, 0, 360 * 64)
# Actual window
self.window = self.screen.root.create_window(
100, 100, 400, 300, 0,
self.screen.root_depth,
X.InputOutput,
X.CopyFromParent,
# special attribute values
background_pixmap=bgpm,
event_mask=(
X.StructureNotifyMask |
X.ButtonReleaseMask
),
colormap=X.CopyFromParent
)
# Set some WM info
self.WM_DELETE_WINDOW = self.d.intern_atom('WM_DELETE_WINDOW')
self.WM_PROTOCOLS = self.d.intern_atom('WM_PROTOCOLS')
self.window.set_wm_name('Xlib example: childwin.py')
self.window.set_wm_icon_name('childwin.py')
self.window.set_wm_class('childwin', 'XlibExample')
self.window.set_wm_protocols([self.WM_DELETE_WINDOW])
self.window.set_wm_hints(
flags=Xutil.StateHint,
initial_state=Xutil.NormalState
)
self.window.set_wm_normal_hints(
flags=(Xutil.PPosition | Xutil.PSize | Xutil.PMinSize),
min_width=50,
min_height=50
)
# Map the window, making it visible
self.window.map()
# Child window
(self.childWidth, self.childHeight) = (20, 20)
self.childWindow = self.window.create_window(
20, 20, self.childWidth, self.childHeight, 0,
self.screen.root_depth,
X.CopyFromParent,
X.CopyFromParent,
# special attribute values
background_pixel=self.screen.white_pixel,
colormap=X.CopyFromParent,
)
self.childWindow.map()
# Main loop, handling events
def loop(self):
current = None
while 1:
e = self.d.next_event()
# Window has been destroyed, quit
if e.type == X.DestroyNotify:
sys.exit(0)
# Button released, add or subtract
elif e.type == X.ButtonRelease:
if e.detail == 1:
print "Moving child window."
self.childWindow.configure(
x=e.event_x - self.childWidth / 2,
y=e.event_y - self.childHeight / 2
)
self.d.flush()
# Somebody wants to tell us something
elif e.type == X.ClientMessage:
if e.client_type == self.WM_PROTOCOLS:
fmt, data = e.data
if fmt == 32 and data[0] == self.WM_DELETE_WINDOW:
sys.exit(0)
if __name__ == '__main__':
Window(display.Display()).loop()
| gpl-2.0 |
vrenaville/ngo-addons-backport | addons/edi/models/__init__.py | 442 | 1116 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import edi
import res_partner
import res_company
import res_currency
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
btat/Booktype | lib/booktype/apps/edit/views.py | 1 | 8544 | # -*- coding: utf-8 -*-
import datetime
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.conf import settings
from booki.utils import log
from booki.editor import models
def getTOCForBook(version):
results = []
for chap in version.get_toc():
# is it a section or chapter?
if chap.chapter:
results.append((chap.chapter.id,
chap.chapter.title,
chap.chapter.url_title,
chap.typeof,
chap.chapter.status.id))
else:
results.append(('s%s' % chap.id, chap.name, chap.name, chap.typeof))
return results
@login_required
@transaction.commit_manually
def upload_attachment(request, bookid, version=None):
import json
import datetime
import os.path
from booki.utils.misc import bookiSlugify
try:
book = models.Book.objects.get(url_title__iexact=bookid)
except models.Book.DoesNotExist:
return pages.ErrorPage(request, "errors/book_does_not_exist.html", {"book_name": bookid})
book_version = book.get_version(version)
stat = models.BookStatus.objects.filter(book = book)[0]
operationResult = True
# check this for transactions
try:
fileData = request.FILES['files[]']
att = models.Attachment(version = book_version,
# must remove this reference
created = datetime.datetime.now(),
book = book,
status = stat)
att.save()
attName, attExt = os.path.splitext(fileData.name)
att.attachment.save('{}{}'.format(bookiSlugify(attName), attExt), fileData, save = False)
att.save()
# TODO:
# must write info about this to log!
# except IOError:
# operationResult = False
# transaction.rollback()
except:
oprerationResult = False
transaction.rollback()
else:
# maybe check file name now and save with new name
transaction.commit()
response_data = {"files":[{"url":"http://127.0.0.1/",
"thumbnail_url":"http://127.0.0.1/",
"name":"boot.png",
"type":"image/png",
"size":172728,
"delete_url":"",
"delete_type":"DELETE"}]}
if "application/json" in request.META['HTTP_ACCEPT']:
return HttpResponse(json.dumps(response_data), mimetype="application/json")
else:
return HttpResponse(json.dumps(response_data), mimetype="text/html")
@login_required
@transaction.commit_manually
def upload_cover(request, bookid, version=None):
import json
import datetime
try:
book = models.Book.objects.get(url_title__iexact=bookid)
except models.Book.DoesNotExist:
return pages.ErrorPage(request, "errors/book_does_not_exist.html", {"book_name": bookid})
book_version = book.get_version(version)
operationResult = True
# check this for transactions
try:
fileData = request.FILES['files[]']
title = request.POST.get('title', '')
import hashlib
h = hashlib.sha1()
h.update(fileData.name)
h.update(title)
h.update(str(datetime.datetime.now()))
license = models.License.objects.get(abbrevation=request.POST.get('license', ''))
try:
filename = unidecode.unidecode(fileData.name)
except:
filename = ''
cov = models.BookCover(book = book,
user = request.user,
cid = h.hexdigest(),
title = title,
filename = filename[:250],
width = 0,
height = 0,
unit = request.POST.get('unit', 'mm'),
booksize = request.POST.get('booksize', ''),
cover_type = request.POST.get('type', ''),
creator = request.POST.get('creator', '')[:40],
license = license,
notes = request.POST.get('notes', '')[:500],
approved = False,
is_book = False,
is_ebook = True,
is_pdf = False,
created = datetime.datetime.now())
cov.save()
cov.attachment.save(fileData.name, fileData, save = False)
cov.save()
# TODO:
# must write info about this to log!
# except IOError:
# operationResult = False
# transaction.rollback()
except:
oprerationResult = False
transaction.rollback()
else:
# maybe check file name now and save with new name
transaction.commit()
response_data = {"files":[{"url":"http://127.0.0.1/",
"thumbnail_url":"http://127.0.0.1/",
"name":"boot.png",
"type":"image/png",
"size":172728,
"delete_url":"",
"delete_type":"DELETE"}]}
if "application/json" in request.META['HTTP_ACCEPT']:
return HttpResponse(json.dumps(response_data), mimetype="application/json")
else:
return HttpResponse(json.dumps(response_data), mimetype="text/html")
def cover(request, bookid, cid, fname = None, version=None):
from django.views import static
try:
book = models.Book.objects.get(url_title__iexact=bookid)
except models.Book.DoesNotExist:
return pages.ErrorPage(request, "errors/book_does_not_exist.html", {"book_name": bookid})
try:
cover = models.BookCover.objects.get(cid = cid)
except models.BookCover.DoesNotExist:
return HttpResponse(status=500)
document_path = cover.attachment.path
# extenstion
import mimetypes
mimetypes.init()
extension = cover.filename.split('.')[-1].lower()
if extension == 'tif':
extension = 'tiff'
if extension == 'jpg':
extension = 'jpeg'
content_type = mimetypes.types_map.get('.'+extension, 'binary/octet-stream')
if request.GET.get('preview', '') == '1':
try:
from PIL import Image
except ImportError:
import Image
try:
if extension.lower() in ['pdf', 'psd', 'svg']:
raise
im = Image.open(cover.attachment.name)
im.thumbnail((300, 200), Image.ANTIALIAS)
except:
try:
im = Image.open('%s/editor/images/booktype-cover-%s.png' % (settings.STATIC_ROOT, extension.lower()))
extension = 'png'
content_type = 'image/png'
except:
# Not just IOError but anything else
im = Image.open('%s/editor/images/booktype-cover-error.png' % settings.STATIC_ROOT)
extension = 'png'
content_type = 'image/png'
response = HttpResponse(content_type=content_type)
if extension.lower() in ['jpg', 'jpeg', 'png', 'gif', 'tiff', 'bmp', 'tif']:
if extension.upper() == 'JPG': extension = 'JPEG'
else:
extension = 'jpeg'
im.save(response, extension.upper())
return response
try:
data = open(document_path, 'rb').read()
except IOError:
return HttpResponse(status=500)
response = HttpResponse(data, content_type=content_type)
return response
@login_required
def edit(request, bookid):
book = models.Book.objects.get(url_title__iexact=bookid)
toc = getTOCForBook(book.get_version(None))
book_version = book.get_version(None)
book_version = '1.0'
resp = render(request, 'edit/book_edit.html', {'request': request,
'chapters': toc,
'book': book,
'book_version': book_version})
return resp
| agpl-3.0 |
isyippee/nova | nova/api/openstack/compute/legacy_v2/contrib/flavorextradata.py | 79 | 2440 | # Copyright 2012 OpenStack Foundation
# Copyright 2011 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor extra data extension
OpenStack API version 1.1 lists "name", "ram", "disk", "vcpus" as flavor
attributes. This extension adds to that list:
- OS-FLV-EXT-DATA:ephemeral
"""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
authorize = extensions.soft_extension_authorizer('compute', 'flavorextradata')
class FlavorextradataController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = "%s:ephemeral" % Flavorextradata.alias
flavor[key] = db_flavor['ephemeral_gb']
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class Flavorextradata(extensions.ExtensionDescriptor):
"""Provide additional data for flavors."""
name = "FlavorExtraData"
alias = "OS-FLV-EXT-DATA"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_extra_data/api/v1.1")
updated = "2011-09-14T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorextradataController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
| apache-2.0 |
levkar/odoo | addons/hr_gamification/models/gamification.py | 53 | 1560 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class GamificationBadgeUser(models.Model):
"""User having received a badge"""
_inherit = 'gamification.badge.user'
employee_id = fields.Many2one('hr.employee', string='Employee')
@api.constrains('employee_id')
def _check_employee_related_user(self):
for badge_user in self:
if badge_user.employee_id not in badge_user.user_id.employee_ids:
raise ValidationError(_('The selected employee does not correspond to the selected user.'))
class GamificationBadge(models.Model):
_inherit = 'gamification.badge'
granted_employees_count = fields.Integer(compute="_compute_granted_employees_count")
@api.depends('owner_ids.employee_id')
def _compute_granted_employees_count(self):
for badge in self:
badge.granted_employees_count = self.env['gamification.badge.user'].search_count([
('badge_id', '=', badge.id),
('employee_id', '!=', False)
])
@api.multi
def get_granted_employees(self):
employee_ids = self.mapped('owner_ids.employee_id').ids
return {
'type': 'ir.actions.act_window',
'name': 'Granted Employees',
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'res_model': 'hr.employee',
'domain': [('id', 'in', employee_ids)]
}
| agpl-3.0 |
oliciv/youtube-dl | youtube_dl/extractor/niconico.py | 34 | 10884 | # encoding: utf-8
from __future__ import unicode_literals
import re
import json
import datetime
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
encode_dict,
ExtractorError,
int_or_none,
parse_duration,
parse_iso8601,
xpath_text,
determine_ext,
)
class NiconicoIE(InfoExtractor):
IE_NAME = 'niconico'
IE_DESC = 'ニコニコ動画'
_TESTS = [{
'url': 'http://www.nicovideo.jp/watch/sm22312215',
'md5': 'd1a75c0823e2f629128c43e1212760f9',
'info_dict': {
'id': 'sm22312215',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'uploader': 'takuya0301',
'uploader_id': '2698420',
'upload_date': '20131123',
'timestamp': 1385182762,
'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
'duration': 33,
},
}, {
# File downloaded with and without credentials are different, so omit
# the md5 field
'url': 'http://www.nicovideo.jp/watch/nm14296458',
'info_dict': {
'id': 'nm14296458',
'ext': 'swf',
'title': '【鏡音リン】Dance on media【オリジナル】take2!',
'description': 'md5:689f066d74610b3b22e0f1739add0f58',
'uploader': 'りょうた',
'uploader_id': '18822557',
'upload_date': '20110429',
'timestamp': 1304065916,
'duration': 209,
},
}, {
# 'video exists but is marked as "deleted"
# md5 is unstable
'url': 'http://www.nicovideo.jp/watch/sm10000',
'info_dict': {
'id': 'sm10000',
'ext': 'unknown_video',
'description': 'deleted',
'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
'upload_date': '20071224',
'timestamp': 1198527840, # timestamp field has different value if logged in
'duration': 304,
},
}, {
'url': 'http://www.nicovideo.jp/watch/so22543406',
'info_dict': {
'id': '1388129933',
'ext': 'mp4',
'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
'timestamp': 1388851200,
'upload_date': '20140104',
'uploader': 'アニメロチャンネル',
'uploader_id': '312',
}
}]
_VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
_NETRC_MACHINE = 'niconico'
# Determine whether the downloader used authentication to download video
_AUTHENTICATED = False
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
# No authentication to be performed
if not username:
return True
# Log in
login_form_strs = {
'mail': username,
'password': password,
}
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
request = compat_urllib_request.Request(
'https://secure.nicovideo.jp/secure/login', login_data)
login_results = self._download_webpage(
request, None, note='Logging in', errnote='Unable to log in')
if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username or password')
return False
# Successful login
self._AUTHENTICATED = True
return True
def _real_extract(self, url):
video_id = self._match_id(url)
# Get video webpage. We are not actually interested in it for normal
# cases, but need the cookies in order to be able to download the
# info webpage
webpage, handle = self._download_webpage_handle(
'http://www.nicovideo.jp/watch/' + video_id, video_id)
if video_id.startswith('so'):
video_id = self._match_id(handle.geturl())
video_info = self._download_xml(
'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
note='Downloading video info page')
if self._AUTHENTICATED:
# Get flv info
flv_info_webpage = self._download_webpage(
'http://flapi.nicovideo.jp/api/getflv/' + video_id + '?as3=1',
video_id, 'Downloading flv info')
else:
# Get external player info
ext_player_info = self._download_webpage(
'http://ext.nicovideo.jp/thumb_watch/' + video_id, video_id)
thumb_play_key = self._search_regex(
r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey')
# Get flv info
flv_info_data = compat_urllib_parse.urlencode({
'k': thumb_play_key,
'v': video_id
})
flv_info_request = compat_urllib_request.Request(
'http://ext.nicovideo.jp/thumb_watch', flv_info_data,
{'Content-Type': 'application/x-www-form-urlencoded'})
flv_info_webpage = self._download_webpage(
flv_info_request, video_id,
note='Downloading flv info', errnote='Unable to download flv info')
flv_info = compat_urlparse.parse_qs(flv_info_webpage)
if 'url' not in flv_info:
if 'deleted' in flv_info:
raise ExtractorError('The video has been deleted.',
expected=True)
else:
raise ExtractorError('Unable to find video URL')
video_real_url = flv_info['url'][0]
# Start extracting information
title = xpath_text(video_info, './/title')
if not title:
title = self._og_search_title(webpage, default=None)
if not title:
title = self._html_search_regex(
r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
webpage, 'video title')
watch_api_data_string = self._html_search_regex(
r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
webpage, 'watch api data', default=None)
watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
video_detail = watch_api_data.get('videoDetail', {})
extension = xpath_text(video_info, './/movie_type')
if not extension:
extension = determine_ext(video_real_url)
thumbnail = (
xpath_text(video_info, './/thumbnail_url') or
self._html_search_meta('image', webpage, 'thumbnail', default=None) or
video_detail.get('thumbnail'))
description = xpath_text(video_info, './/description')
timestamp = parse_iso8601(xpath_text(video_info, './/first_retrieve'))
if not timestamp:
match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
if match:
timestamp = parse_iso8601(match.replace('+', ':00+'))
if not timestamp and video_detail.get('postedAt'):
timestamp = parse_iso8601(
video_detail['postedAt'].replace('/', '-'),
delimiter=' ', timezone=datetime.timedelta(hours=9))
view_count = int_or_none(xpath_text(video_info, './/view_counter'))
if not view_count:
match = self._html_search_regex(
r'>Views: <strong[^>]*>([^<]+)</strong>',
webpage, 'view count', default=None)
if match:
view_count = int_or_none(match.replace(',', ''))
view_count = view_count or video_detail.get('viewCount')
comment_count = int_or_none(xpath_text(video_info, './/comment_num'))
if not comment_count:
match = self._html_search_regex(
r'>Comments: <strong[^>]*>([^<]+)</strong>',
webpage, 'comment count', default=None)
if match:
comment_count = int_or_none(match.replace(',', ''))
comment_count = comment_count or video_detail.get('commentCount')
duration = (parse_duration(
xpath_text(video_info, './/length') or
self._html_search_meta(
'video:duration', webpage, 'video duration', default=None)) or
video_detail.get('length'))
webpage_url = xpath_text(video_info, './/watch_url') or url
if video_info.find('.//ch_id') is not None:
uploader_id = video_info.find('.//ch_id').text
uploader = video_info.find('.//ch_name').text
elif video_info.find('.//user_id') is not None:
uploader_id = video_info.find('.//user_id').text
uploader = video_info.find('.//user_nickname').text
else:
uploader_id = uploader = None
return {
'id': video_id,
'url': video_real_url,
'title': title,
'ext': extension,
'format_id': 'economy' if video_real_url.endswith('low') else 'normal',
'thumbnail': thumbnail,
'description': description,
'uploader': uploader,
'timestamp': timestamp,
'uploader_id': uploader_id,
'view_count': view_count,
'comment_count': comment_count,
'duration': duration,
'webpage_url': webpage_url,
}
class NiconicoPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://www\.nicovideo\.jp/mylist/(?P<id>\d+)'
_TEST = {
'url': 'http://www.nicovideo.jp/mylist/27411728',
'info_dict': {
'id': '27411728',
'title': 'AKB48のオールナイトニッポン',
},
'playlist_mincount': 225,
}
def _real_extract(self, url):
list_id = self._match_id(url)
webpage = self._download_webpage(url, list_id)
entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
webpage, 'entries')
entries = json.loads(entries_json)
entries = [{
'_type': 'url',
'ie_key': NiconicoIE.ie_key(),
'url': ('http://www.nicovideo.jp/watch/%s' %
entry['item_data']['video_id']),
} for entry in entries]
return {
'_type': 'playlist',
'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),
'id': list_id,
'entries': entries,
}
| unlicense |
sheepray/volatility | volatility/debug.py | 54 | 2718 | # Volatility
#
# Authors:
# Michael Cohen <scudette@users.sourceforge.net>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
""" General debugging framework """
import pdb
import sys
import inspect
import logging
import volatility.conf
config = volatility.conf.ConfObject()
config.add_option("DEBUG", short_option = 'd', default = 0,
cache_invalidator = False,
action = 'count', help = "Debug volatility")
# Largest debug value used + 1
MAX_DEBUG = 3
def setup(level = 0):
"""Sets up the global logging environment"""
formatstr = "%(levelname)-8s: %(name)-20s: %(message)s"
logging.basicConfig(format = formatstr)
rootlogger = logging.getLogger('')
rootlogger.setLevel(logging.DEBUG + 1 - level)
for i in range(1, 9):
logging.addLevelName(logging.DEBUG - i, "DEBUG" + str(i))
def debug(msg, level = 1):
"""Logs a message at the DEBUG level"""
log(msg, logging.DEBUG + 1 - level)
def info(msg):
"""Logs a message at the INFO level"""
log(msg, logging.INFO)
def warning(msg):
"""Logs a message at the WARNING level"""
log(msg, logging.WARNING)
def error(msg):
log(msg, logging.ERROR)
sys.exit(1)
def critical(msg):
log(msg, logging.CRITICAL)
sys.exit(1)
def log(msg, level):
modname = "volatility.py"
try:
frm = inspect.currentframe()
modname = "volatility.debug"
while modname == "volatility.debug":
frm = frm.f_back
mod = inspect.getmodule(frm)
modname = mod.__name__
except AttributeError:
pass
finally:
del frm
_log(msg, modname, level)
def _log(msg, facility, loglevel):
"""Outputs a debugging message"""
logger = logging.getLogger(facility)
logger.log(loglevel, msg)
def b(level = 1):
"""Enters the debugger at the call point"""
if config.DEBUG >= level:
pdb.set_trace()
trace = b
def post_mortem(level = 1):
"""Provides a command line interface to python after an exception's occurred"""
if config.DEBUG >= level:
pdb.post_mortem()
| gpl-2.0 |
fran-penedo/dreal | benchmarks/network/water/water-triple-sat.py | 11 | 6919 |
from gen import *
##########
# shared #
##########
flow_var[0] = """
(declare-fun tau () Real)
(declare-fun x1 () Real)
(declare-fun x2 () Real)
(declare-fun x3 () Real)
"""
flow_dec[0] = """
(define-ode flow_1 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_2 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
(define-ode flow_3 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_4 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
(define-ode flow_5 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_6 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
(define-ode flow_7 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_8 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
"""
state_dec[0] = """
(declare-fun time_{0} () Real)
(declare-fun tau_{0}_0 () Real)
(declare-fun tau_{0}_t () Real)
(declare-fun mode1_{0} () Bool)
(declare-fun x1_{0}_0 () Real)
(declare-fun x1_{0}_t () Real)
(declare-fun mode2_{0} () Bool)
(declare-fun x2_{0}_0 () Real)
(declare-fun x2_{0}_t () Real)
(declare-fun mode3_{0} () Bool)
(declare-fun x3_{0}_0 () Real)
(declare-fun x3_{0}_t () Real)
"""
state_val[0] = """
(assert (<= 0 time_{0})) (assert (<= time_{0} 1))
(assert (<= 0 tau_{0}_0)) (assert (<= tau_{0}_0 1))
(assert (<= 0 tau_{0}_t)) (assert (<= tau_{0}_t 1))
(assert (<= 0 x1_{0}_0)) (assert (<= x1_{0}_0 10))
(assert (<= 0 x1_{0}_t)) (assert (<= x1_{0}_t 10))
(assert (<= 0 x2_{0}_0)) (assert (<= x2_{0}_0 10))
(assert (<= 0 x2_{0}_t)) (assert (<= x2_{0}_t 10))
(assert (<= 0 x3_{0}_0)) (assert (<= x3_{0}_0 10))
(assert (<= 0 x3_{0}_t)) (assert (<= x3_{0}_t 10))
"""
cont_cond[0] = ["""
(assert (and (>= tau_{0}_0 0) (<= tau_{0}_0 1)
(>= tau_{0}_t 0) (<= tau_{0}_t 1)
(forall_t 1 [0 time_{0}] (>= tau_{0}_t 0))
(forall_t 2 [0 time_{0}] (<= tau_{0}_t 1))))
(assert (or (and (= mode1_{0} true) (= mode2_{0} true) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_1)))
(and (= mode1_{0} true) (= mode2_{0} true) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_2)))
(and (= mode1_{0} true) (= mode2_{0} false) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_3)))
(and (= mode1_{0} true) (= mode2_{0} false) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_4)))
(and (= mode1_{0} false) (= mode2_{0} true) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_5)))
(and (= mode1_{0} false) (= mode2_{0} true) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_6)))
(and (= mode1_{0} false) (= mode2_{0} false) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_7)))
(and (= mode1_{0} false) (= mode2_{0} false) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_8)))))"""]
jump_cond[0] = ["""
(assert (and (= tau_{0}_t 1) (= tau_{1}_0 0)))
(assert (and (= x1_{1}_0 x1_{0}_t)))
(assert (or (and (< x1_{0}_t 5) (= mode1_{1} true))
(and (>= x1_{0}_t 5) (= mode1_{1} false))))
(assert (and (= x2_{1}_0 x2_{0}_t)))
(assert (or (and (< x2_{0}_t 5) (= mode2_{1} true))
(and (>= x2_{0}_t 5) (= mode2_{1} false))))
(assert (and (= x3_{1}_0 x3_{0}_t)))
(assert (or (and (< x3_{0}_t 5) (= mode3_{1} true))
(and (>= x3_{0}_t 5) (= mode3_{1} false))))"""]
#############
# Init/Goal #
#############
init_cond = """
(assert (= tau_{0}_0 0))
(assert (= mode1_{0} true))
(assert (and (>= x1_{0}_0 (- 5 0.1)) (<= x1_{0}_0 (+ 5 0.1))))
(assert (= mode2_{0} true))
(assert (and (>= x2_{0}_0 (- 5 0.1)) (<= x2_{0}_0 (+ 5 0.1))))
(assert (= mode3_{0} true))
(assert (and (>= x3_{0}_0 (- 5 0.1)) (<= x3_{0}_0 (+ 5 0.1))))
"""
goal_cond = """
(assert (or (< x1_{0}_t (- 5 0.1)) (> x1_{0}_t (+ 5 0.1))))
(assert (or (< x2_{0}_t (- 5 0.1)) (> x2_{0}_t (+ 5 0.1))))
(assert (or (< x3_{0}_t (- 5 0.1)) (> x3_{0}_t (+ 5 0.1))))
"""
import sys
try:
bound = int(sys.argv[1])
except:
print("Usage:", sys.argv[0], "<Bound>")
else:
generate(bound, 1, [0], 0, init_cond, goal_cond)
| gpl-3.0 |
qrkourier/ansible | test/runner/lib/cloud/vcenter.py | 32 | 4693 | """VMware vCenter plugin for integration tests."""
from __future__ import absolute_import, print_function
import os
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.util import (
find_executable,
display,
)
from lib.docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
get_docker_container_id,
)
try:
# noinspection PyPep8Naming
import ConfigParser as configparser
except ImportError:
# noinspection PyUnresolvedReferences
import configparser
class VcenterProvider(CloudProvider):
"""VMware vcenter/esx plugin. Sets up cloud resources for tests."""
DOCKER_SIMULATOR_NAME = 'vcenter-simulator'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(VcenterProvider, self).__init__(args, config_extension='.ini')
self.image = 'ansible/ansible:vcenter-simulator'
self.container_name = ''
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
docker = find_executable('docker')
if docker:
return
super(VcenterProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(VcenterProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
if self.managed:
return ['--link', self.DOCKER_SIMULATOR_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.container_name:
docker_rm(self.args, self.container_name)
super(VcenterProvider, self).cleanup()
def _setup_dynamic(self):
"""Create a vcenter simulator using docker."""
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0].get('State', {}).get('Running'):
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing vCenter simulator docker container.', verbosity=1)
else:
display.info('Starting a new vCenter simulator docker container.', verbosity=1)
if not self.args.docker and not container_id:
# publish the simulator ports when not running inside docker
publish_ports = [
'-p', '80:80',
'-p', '443:443',
'-p', '8080:8080',
'-p', '8989:8989',
'-p', '5000:5000', # control port for flask app in simulator
]
else:
publish_ports = []
docker_pull(self.args, self.image)
docker_run(
self.args,
self.image,
['-d', '--name', self.container_name] + publish_ports,
)
if self.args.docker:
vcenter_host = self.DOCKER_SIMULATOR_NAME
elif container_id:
vcenter_host = self._get_simulator_address()
display.info('Found vCenter simulator container address: %s' % vcenter_host, verbosity=1)
else:
vcenter_host = 'localhost'
self._set_cloud_config('vcenter_host', vcenter_host)
def _get_simulator_address(self):
results = docker_inspect(self.args, self.container_name)
ipaddress = results[0]['NetworkSettings']['IPAddress']
return ipaddress
def _setup_static(self):
raise NotImplementedError()
class VcenterEnvironment(CloudEnvironment):
"""VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
# Send the container IP down to the integration test(s)
env['vcenter_host'] = self._get_cloud_config('vcenter_host')
| gpl-3.0 |
userbz/DeMix | deprecated/Version_0/post1_psmMzmlExtend.py | 1 | 1529 | # Bo.Zhang@ki.se
# extending PSM list from second-pass Morpheus search for rescoring
import sys
import pymzml
import numpy
import pandas
def nearest(target, arr):
try:
return arr[numpy.abs(arr - target).argmin()]
except:
return 0
def peak_pair(target, arr):
match = [nearest(p, arr) for p in target]
return repr(match)
def pymzml_psm(fn, df):
speciter = pymzml.run.Reader(fn)
index_set = set(df.index)
df['Nearest Matches'] = ['' for _ in df.index]
try:
for spec in speciter:
idx = int(spec.xmlTree.next().get('index')) + 1
if idx and idx % 2000 == 0:
sys.stderr.write("%d %s\n" % (idx, fn))
if spec['ms level'] != 2 or idx not in index_set:
continue
theoSpec = df.loc[idx]['Theoretical Products']
specPeaks = numpy.array(map(lambda p: p[0], spec.peaks))
match = peak_pair(theoSpec, specPeaks)
df.loc[idx, 'Nearest Matches'] = match
except KeyError:
pass
return df
if __name__ == '__main__':
df = pandas.read_table(sys.argv[1], index_col= 1)
if 'Theoretical Products' in df.columns:
df['Theoretical Products'] = [eval(i) for i in df['Theoretical Products']]
else:
import psmTheoretical
df = psmTheoretical.add_theoretical(sys.argv[1])
df.to_csv(sys.argv[1]+ '.ext', index=0, sep='\t')
df = pymzml_psm(sys.argv[2], df)
df.to_csv(sys.argv[1]+ '.ext.matched', index=0, sep='\t')
| mit |
jamessergeant/pylearn2 | pylearn2/models/mnd.py | 49 | 4148 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.models.model import Model
from pylearn2.utils import sharedX
import numpy as np
import theano.tensor as T
class DiagonalMND(Model):
"""
A model based on the multivariate normal distribution. This variant is
constrained to have diagonal covariance.
Parameters
----------
nvis : WRITEME
init_beta : WRITEME
init_mu : WRITEME
min_beta : WRITEME
max_beta : WRITEME
"""
# TODO: unify this with distribution.mnd
def __init__(self, nvis,
init_beta,
init_mu,
min_beta,
max_beta):
#copy all arguments to the object
self.__dict__.update( locals() )
del self.self
super(DiagonalMND,self).__init__()
#build the object
self.redo_everything()
def redo_everything(self):
"""
.. todo::
WRITEME
"""
self.beta = sharedX(np.ones((self.nvis,))*self.init_beta,'beta')
self.mu = sharedX(np.ones((self.nvis,))*self.init_mu,'mu')
self.redo_theano()
def free_energy(self, X):
"""
.. todo::
WRITEME
"""
diff = X-self.mu
sq = T.sqr(diff)
return 0.5 * T.dot( sq, self.beta )
def log_prob(self, X):
"""
.. todo::
WRITEME
"""
return -self.free_energy(X) - self.log_partition_function()
def log_partition_function(self):
"""
.. todo::
WRITEME
"""
# Z^-1 = (2pi)^{-nvis/2} det( beta^-1 )^{-1/2}
# Z = (2pi)^(nvis/2) sqrt( det( beta^-1) )
# log Z = (nvis/2) log 2pi - (1/2) sum(log(beta))
return float(self.nvis)/2. * np.log(2*np.pi) - 0.5 * T.sum(T.log(self.beta))
def redo_theano(self):
"""
.. todo::
WRITEME
"""
init_names = dir(self)
self.censored_updates = {}
for param in self.get_params():
self.censored_updates[param] = set([])
final_names = dir(self)
self.register_names_to_del( [name for name in final_names if name not in init_names])
def _modify_updates(self, updates):
"""
.. todo::
WRITEME
"""
if self.beta in updates and updates[self.beta] not in self.censored_updates[self.beta]:
updates[self.beta] = T.clip(updates[self.beta], self.min_beta, self.max_beta )
params = self.get_params()
for param in updates:
if param in params:
self.censored_updates[param] = self.censored_updates[param].union(set([updates[param]]))
def get_params(self):
"""
.. todo::
WRITEME
"""
return [self.mu, self.beta ]
def kl_divergence(q,p):
"""
.. todo::
WRITEME
"""
#KL divergence of two DiagonalMNDs
#http://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#KL_divergence_for_Normal_Distributions
#D_KL(q||p) = 0.5 ( beta_q^T beta_p^-1 + beta_p^T sq(mu_p - mu_q) - log(det Siga_q / det Sigma_p) - k)
assert isinstance(q,DiagonalMND)
assert isinstance(p,DiagonalMND)
assert q.nvis == p.nvis
k = q.nvis
beta_q = q.beta
beta_p = p.beta
beta_q_inv = 1./beta_q
trace_term = T.dot(beta_q_inv,beta_p)
assert trace_term.ndim == 0
mu_p = p.mu
mu_q = q.mu
quad_term = T.dot(beta_p, T.sqr(mu_p-mu_q))
assert quad_term.ndim == 0
# - log ( det Sigma_q / det Sigma_p)
# = log det Sigma_p - log det Sigma_q
# = log det Beta_p_inv - log det Beta_q_inv
# = sum(log(beta_p_inv)) - sum(log(beta_q_inv))
# = sum(log(beta_q)) - sum(log(beta_p))
log_term = T.sum(T.log(beta_q)) - T.sum(T.log(beta_p))
assert log_term.ndim == 0
inside_parens = trace_term + quad_term + log_term - k
assert inside_parens.ndim == 0
rval = 0.5 * inside_parens
return rval
| bsd-3-clause |
alexandrujuncu/sos | sos/plugins/openstack_cinder.py | 7 | 3713 | # Copyright (C) 2009 Red Hat, Inc., Joey Boggs <jboggs@redhat.com>
# Copyright (C) 2012 Rackspace US, Inc.,
# Justin Shepherd <jshepher@rackspace.com>
# Copyright (C) 2013 Red Hat, Inc., Flavio Percoco <fpercoco@redhat.com>
# Copyright (C) 2013 Red Hat, Inc., Jeremy Agee <jagee@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class OpenStackCinder(Plugin):
"""OpenStack cinder
"""
plugin_name = "openstack_cinder"
profiles = ('openstack', 'openstack_controller')
option_list = [("db", "gathers openstack cinder db version", "slow",
False)]
def setup(self):
if self.get_option("db"):
self.add_cmd_output(
"cinder-manage db version",
suggest_filename="cinder_db_version")
self.add_copy_spec(["/etc/cinder/"])
self.limit = self.get_option("log_size")
if self.get_option("all_logs"):
self.add_copy_spec_limit("/var/log/cinder/",
sizelimit=self.limit)
else:
self.add_copy_spec_limit("/var/log/cinder/*.log",
sizelimit=self.limit)
def postproc(self):
protect_keys = [
"admin_password", "backup_tsm_password", "chap_password",
"nas_password", "cisco_fc_fabric_password", "coraid_password",
"eqlx_chap_password", "fc_fabric_password",
"hitachi_auth_password", "hitachi_horcm_password",
"hp3par_password", "hplefthand_password", "memcache_secret_key",
"netapp_password", "netapp_sa_password", "nexenta_password",
"password", "qpid_password", "rabbit_password", "san_password",
"ssl_key_password", "vmware_host_password", "zadara_password",
"zfssa_initiator_password", "connection", "zfssa_target_password",
"os_privileged_user_password", "hmac_keys"
]
regexp = r"((?m)^\s*(%s)\s*=\s*)(.*)" % "|".join(protect_keys)
self.do_path_regex_sub("/etc/cinder/*", regexp, r"\1*********")
class DebianCinder(OpenStackCinder, DebianPlugin, UbuntuPlugin):
cinder = False
packages = (
'cinder-api',
'cinder-backup',
'cinder-common',
'cinder-scheduler',
'cinder-volume',
'python-cinder',
'python-cinderclient'
)
def check_enabled(self):
self.cinder = self.is_installed("cinder-common")
return self.cinder
def setup(self):
super(DebianCinder, self).setup()
class RedHatCinder(OpenStackCinder, RedHatPlugin):
cinder = False
packages = ('openstack-cinder',
'python-cinder',
'python-cinderclient')
def check_enabled(self):
self.cinder = self.is_installed("openstack-cinder")
return self.cinder
def setup(self):
super(RedHatCinder, self).setup()
self.add_copy_spec(["/etc/sudoers.d/cinder"])
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
thnee/ansible | test/units/modules/network/onyx/test_onyx_config.py | 52 | 4615 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_config
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxConfigModule(TestOnyxModule):
module = onyx_config
def setUp(self):
super(TestOnyxConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.onyx.onyx_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.onyx.onyx_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.onyx.onyx_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestOnyxConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_onyx_config_unchanged(self):
src = load_fixture('onyx_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_onyx_config_src(self):
src = load_fixture('onyx_config_src.cfg')
set_module_args(dict(src=src))
commands = [
'interface mlag-port-channel 2']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_onyx_config_save(self):
set_module_args(dict(lines=['hostname foo'], save='yes'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 0)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
args = self.load_config.call_args[0][1]
self.assertIn('configuration write', args)
def test_onyx_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_before_after(self):
set_module_args(dict(lines=['hostname foo'],
before=['test1', 'test2'],
after=['test3', 'test4']))
commands = ['test1', 'test2', 'hostname foo', 'test3', 'test4']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, is_updates=True)
| gpl-3.0 |
thaim/ansible | lib/ansible/modules/network/cli/cli_config.py | 12 | 15208 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: cli_config
version_added: "2.7"
author: "Trishna Guha (@trishnaguha)"
short_description: Push text based configuration to network devices over network_cli
description:
- This module provides platform agnostic way of pushing text based
configuration to network devices over network_cli connection plugin.
extends_documentation_fragment: network_agnostic
options:
config:
description:
- The config to be pushed to the network device. This argument
is mutually exclusive with C(rollback) and either one of the
option should be given as input. The config should have
indentation that the device uses.
type: 'str'
commit:
description:
- The C(commit) argument instructs the module to push the
configuration to the device. This is mapped to module check mode.
type: 'bool'
replace:
description:
- If the C(replace) argument is set to C(yes), it will replace
the entire running-config of the device with the C(config)
argument value. For devices that support replacing running
configuration from file on device like NXOS/JUNOS, the
C(replace) argument takes path to the file on the device
that will be used for replacing the entire running-config.
The value of C(config) option should be I(None) for such devices.
Nexus 9K devices only support replace. Use I(net_put) or
I(nxos_file_copy) in case of NXOS module to copy the flat file
to remote device and then use set the fullpath to this argument.
type: 'str'
backup:
description:
- This argument will cause the module to create a full backup of
the current running config from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory or role root directory, if playbook is part of an
ansible role. If the directory does not exist, it is created.
type: bool
default: 'no'
version_added: "2.8"
rollback:
description:
- The C(rollback) argument instructs the module to rollback the
current configuration to the identifier specified in the
argument. If the specified rollback identifier does not
exist on the remote device, the module will fail. To rollback
to the most recent commit, set the C(rollback) argument to 0.
This option is mutually exclusive with C(config).
commit_comment:
description:
- The C(commit_comment) argument specifies a text string to be used
when committing the configuration. If the C(commit) argument
is set to False, this argument is silently ignored. This argument
is only valid for the platforms that support commit operation
with comment.
type: 'str'
defaults:
description:
- The I(defaults) argument will influence how the running-config
is collected from the device. When the value is set to true,
the command used to collect the running-config is append with
the all keyword. When the value is set to false, the command
is issued without the all keyword.
default: 'no'
type: 'bool'
multiline_delimiter:
description:
- This argument is used when pushing a multiline configuration
element to the device. It specifies the character to use as
the delimiting character. This only applies to the configuration
action.
type: 'str'
diff_replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the C(diff_replace) argument is set to I(line)
then the modified lines are pushed to the device in configuration
mode. If the argument is set to I(block) then the entire command
block is pushed to the device in configuration mode if any
line is not correct. Note that this parameter will be ignored if
the platform has onbox diff support.
choices: ['line', 'block', 'config']
diff_match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If C(diff_match)
is set to I(line), commands are matched line by line. If C(diff_match)
is set to I(strict), command lines are matched with respect to position.
If C(diff_match) is set to I(exact), command lines must be an equal match.
Finally, if C(diff_match) is set to I(none), the module will not attempt
to compare the source configuration with the running configuration on the
remote device. Note that this parameter will be ignored if the platform
has onbox diff support.
choices: ['line', 'strict', 'exact', 'none']
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
Note that this parameter will be ignored if the platform has onbox
diff support.
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
- name: configure device with config
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
- name: multiline config
cli_config:
config: |
hostname foo
feature nxapi
- name: configure device with config with defaults enabled
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
defaults: yes
- name: Use diff_match
cli_config:
config: "{{ lookup('file', 'interface_config') }}"
diff_match: none
- name: nxos replace config
cli_config:
replace: 'bootflash:nxoscfg'
- name: junos replace config
cli_config:
replace: '/var/home/ansible/junos01.cfg'
- name: commit with comment
cli_config:
config: set system host-name foo
commit_comment: this is a test
- name: configurable backup path
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['interface Loopback999', 'no shutdown']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/hostname_config.2016-07-16@22:28:34
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils._text import to_text
def validate_args(module, device_operations):
"""validate param if it is supported on the platform
"""
feature_list = [
'replace', 'rollback', 'commit_comment', 'defaults', 'multiline_delimiter',
'diff_replace', 'diff_match', 'diff_ignore_lines',
]
for feature in feature_list:
if module.params[feature]:
supports_feature = device_operations.get('supports_%s' % feature)
if supports_feature is None:
module.fail_json(
"This platform does not specify whether %s is supported or not. "
"Please report an issue against this platform's cliconf plugin." % feature
)
elif not supports_feature:
module.fail_json(msg='Option %s is not supported on this platform' % feature)
def run(module, device_operations, connection, candidate, running, rollback_id):
result = {}
resp = {}
config_diff = []
banner_diff = {}
replace = module.params['replace']
commit_comment = module.params['commit_comment']
multiline_delimiter = module.params['multiline_delimiter']
diff_replace = module.params['diff_replace']
diff_match = module.params['diff_match']
diff_ignore_lines = module.params['diff_ignore_lines']
commit = not module.check_mode
if replace in ('yes', 'true', 'True'):
replace = True
elif replace in ('no', 'false', 'False'):
replace = False
if replace is not None and replace not in [True, False] and candidate is not None:
module.fail_json(msg="Replace value '%s' is a configuration file path already"
" present on the device. Hence 'replace' and 'config' options"
" are mutually exclusive" % replace)
if rollback_id is not None:
resp = connection.rollback(rollback_id, commit)
if 'diff' in resp:
result['changed'] = True
elif device_operations.get('supports_onbox_diff'):
if diff_replace:
module.warn('diff_replace is ignored as the device supports onbox diff')
if diff_match:
module.warn('diff_mattch is ignored as the device supports onbox diff')
if diff_ignore_lines:
module.warn('diff_ignore_lines is ignored as the device supports onbox diff')
if candidate and not isinstance(candidate, list):
candidate = candidate.strip('\n').splitlines()
kwargs = {'candidate': candidate, 'commit': commit, 'replace': replace,
'comment': commit_comment}
resp = connection.edit_config(**kwargs)
if 'diff' in resp:
result['changed'] = True
elif device_operations.get('supports_generate_diff'):
kwargs = {'candidate': candidate, 'running': running}
if diff_match:
kwargs.update({'diff_match': diff_match})
if diff_replace:
kwargs.update({'diff_replace': diff_replace})
if diff_ignore_lines:
kwargs.update({'diff_ignore_lines': diff_ignore_lines})
diff_response = connection.get_diff(**kwargs)
config_diff = diff_response.get('config_diff')
banner_diff = diff_response.get('banner_diff')
if config_diff:
if isinstance(config_diff, list):
candidate = config_diff
else:
candidate = config_diff.splitlines()
kwargs = {'candidate': candidate, 'commit': commit, 'replace': replace,
'comment': commit_comment}
if commit:
connection.edit_config(**kwargs)
result['changed'] = True
if banner_diff:
candidate = json.dumps(banner_diff)
kwargs = {'candidate': candidate, 'commit': commit}
if multiline_delimiter:
kwargs.update({'multiline_delimiter': multiline_delimiter})
if commit:
connection.edit_banner(**kwargs)
result['changed'] = True
if module._diff:
if 'diff' in resp:
result['diff'] = {'prepared': resp['diff']}
else:
diff = ''
if config_diff:
if isinstance(config_diff, list):
diff += '\n'.join(config_diff)
else:
diff += config_diff
if banner_diff:
diff += json.dumps(banner_diff)
result['diff'] = {'prepared': diff}
return result
def main():
"""main entry point for execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
backup=dict(default=False, type='bool'),
backup_options=dict(type='dict', options=backup_spec),
config=dict(type='str'),
commit=dict(type='bool'),
replace=dict(type='str'),
rollback=dict(type='int'),
commit_comment=dict(type='str'),
defaults=dict(default=False, type='bool'),
multiline_delimiter=dict(type='str'),
diff_replace=dict(choices=['line', 'block', 'config']),
diff_match=dict(choices=['line', 'strict', 'exact', 'none']),
diff_ignore_lines=dict(type='list')
)
mutually_exclusive = [('config', 'rollback')]
required_one_of = [['backup', 'config', 'rollback']]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of,
supports_check_mode=True)
result = {'changed': False}
connection = Connection(module._socket_path)
capabilities = module.from_json(connection.get_capabilities())
if capabilities:
device_operations = capabilities.get('device_operations', dict())
validate_args(module, device_operations)
else:
device_operations = dict()
if module.params['defaults']:
if 'get_default_flag' in capabilities.get('rpc'):
flags = connection.get_default_flag()
else:
flags = 'all'
else:
flags = []
candidate = module.params['config']
candidate = to_text(candidate, errors='surrogate_then_replace') if candidate else None
running = connection.get_config(flags=flags)
rollback_id = module.params['rollback']
if module.params['backup']:
result['__backup__'] = running
if candidate or rollback_id or module.params['replace']:
try:
result.update(run(module, device_operations, connection, candidate, running, rollback_id))
except Exception as exc:
module.fail_json(msg=to_text(exc))
module.exit_json(**result)
if __name__ == '__main__':
main()
| mit |
Hao-Liu/avocado | selftests/unit/test_test.py | 1 | 3380 | import os
import shutil
import sys
import tempfile
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
from avocado.core import test
from avocado.utils import script
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""
class TestClassTest(unittest.TestCase):
def setUp(self):
class AvocadoPass(test.Test):
def test(self):
variable = True
self.assertTrue(variable)
self.whiteboard = 'foo'
self.base_logdir = tempfile.mkdtemp(prefix='avocado_test_unittest')
self.tst_instance_pass = AvocadoPass(base_logdir=self.base_logdir)
self.tst_instance_pass.run_avocado()
self.tst_instance_pass_new = AvocadoPass(base_logdir=self.base_logdir)
self.tst_instance_pass_new.run_avocado()
def testClassAttributesName(self):
self.assertEqual(self.tst_instance_pass.name, 'AvocadoPass')
def testClassAttributesStatus(self):
self.assertEqual(self.tst_instance_pass.status, 'PASS')
def testClassAttributesTimeElapsed(self):
self.assertIsInstance(self.tst_instance_pass.time_elapsed, float)
def testClassAttributesTag(self):
self.assertEqual(self.tst_instance_pass.tag, "0")
def testClassAttributesTaggedName(self):
self.assertEqual(self.tst_instance_pass.tagged_name, "AvocadoPass")
def testWhiteboardSave(self):
whiteboard_file = os.path.join(
self.tst_instance_pass.logdir, 'whiteboard')
self.assertTrue(os.path.isfile(whiteboard_file))
with open(whiteboard_file, 'r') as whiteboard_file_obj:
whiteboard_contents = whiteboard_file_obj.read().strip()
self.assertTrue(whiteboard_contents, 'foo')
def testTaggedNameNewTests(self):
"""
New test instances should have crescent tag instances.
"""
self.assertEqual(
self.tst_instance_pass_new.tagged_name, "AvocadoPass.1")
self.assertEqual(self.tst_instance_pass_new.tag, "1")
def tearDown(self):
shutil.rmtree(self.base_logdir)
class SimpleTestClassTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.pass_script = script.TemporaryScript(
'avocado_pass.sh',
PASS_SCRIPT_CONTENTS,
'avocado_simpletest_unittest')
self.pass_script.save()
self.fail_script = script.TemporaryScript(
'avocado_fail.sh',
FAIL_SCRIPT_CONTENTS,
'avocado_simpletest_unittest')
self.fail_script.save()
self.tst_instance_pass = test.SimpleTest(
name=self.pass_script.path,
base_logdir=self.tmpdir)
self.tst_instance_pass.run_avocado()
self.tst_instance_fail = test.SimpleTest(
name=self.fail_script.path,
base_logdir=self.tmpdir)
self.tst_instance_fail.run_avocado()
def testSimpleTestPassStatus(self):
self.assertEqual(self.tst_instance_pass.status, 'PASS')
def testSimpleTestFailStatus(self):
self.assertEqual(self.tst_instance_fail.status, 'FAIL')
def tearDown(self):
self.pass_script.remove()
self.fail_script.remove()
shutil.rmtree(self.tmpdir)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
kenshay/ImageScript | Script_Runner/PYTHON/Lib/asyncio/__init__.py | 11 | 1169 | """The asyncio package, tracking PEP 3156."""
# flake8: noqa
import sys
# This relies on each of the submodules having an __all__ variable.
from .base_events import *
from .coroutines import *
from .events import *
from .futures import *
from .locks import *
from .protocols import *
from .runners import *
from .queues import *
from .streams import *
from .subprocess import *
from .tasks import *
from .transports import *
# Exposed for _asynciomodule.c to implement now deprecated
# Task.all_tasks() method. This function will be removed in 3.9.
from .tasks import _all_tasks_compat # NoQA
__all__ = (base_events.__all__ +
coroutines.__all__ +
events.__all__ +
futures.__all__ +
locks.__all__ +
protocols.__all__ +
runners.__all__ +
queues.__all__ +
streams.__all__ +
subprocess.__all__ +
tasks.__all__ +
transports.__all__)
if sys.platform == 'win32': # pragma: no cover
from .windows_events import *
__all__ += windows_events.__all__
else:
from .unix_events import * # pragma: no cover
__all__ += unix_events.__all__
| gpl-3.0 |
zaxliu/scipy | scipy/optimize/_root.py | 109 | 26007 | """
Unified interfaces to root finding algorithms.
Functions
---------
- root : find a root of a vector function.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['root']
import numpy as np
from scipy._lib.six import callable
from warnings import warn
from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options
from .minpack import _root_hybr, leastsq
from ._spectral import _root_df_sane
from . import nonlin
def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None,
options=None):
"""
Find a root of a vector function.
Parameters
----------
fun : callable
A vector function to find a root of.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to the objective function and its Jacobian.
method : str, optional
Type of solver. Should be one of
- 'hybr' :ref:`(see here) <optimize.root-hybr>`
- 'lm' :ref:`(see here) <optimize.root-lm>`
- 'broyden1' :ref:`(see here) <optimize.root-broyden1>`
- 'broyden2' :ref:`(see here) <optimize.root-broyden2>`
- 'anderson' :ref:`(see here) <optimize.root-anderson>`
- 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>`
- 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>`
- 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>`
- 'krylov' :ref:`(see here) <optimize.root-krylov>`
- 'df-sane' :ref:`(see here) <optimize.root-dfsane>`
jac : bool or callable, optional
If `jac` is a Boolean and is True, `fun` is assumed to return the
value of Jacobian along with the objective function. If False, the
Jacobian will be estimated numerically.
`jac` can also be a callable returning the Jacobian of `fun`. In
this case, it must accept the same arguments as `fun`.
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual. For all methods but 'hybr' and 'lm'.
options : dict, optional
A dictionary of solver options. E.g. `xtol` or `maxiter`, see
:obj:`show_options()` for details.
Returns
-------
sol : OptimizeResult
The solution represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the algorithm exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *hybr*.
Method *hybr* uses a modification of the Powell hybrid method as
implemented in MINPACK [1]_.
Method *lm* solves the system of nonlinear equations in a least squares
sense using a modification of the Levenberg-Marquardt algorithm as
implemented in MINPACK [1]_.
Method *df-sane* is a derivative-free spectral method. [3]_
Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*,
*diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods,
with backtracking or full line searches [2]_. Each method corresponds
to a particular Jacobian approximations. See `nonlin` for details.
- Method *broyden1* uses Broyden's first Jacobian approximation, it is
known as Broyden's good method.
- Method *broyden2* uses Broyden's second Jacobian approximation, it
is known as Broyden's bad method.
- Method *anderson* uses (extended) Anderson mixing.
- Method *Krylov* uses Krylov approximation for inverse Jacobian. It
is suitable for large-scale problem.
- Method *diagbroyden* uses diagonal Broyden Jacobian approximation.
- Method *linearmixing* uses a scalar Jacobian approximation.
- Method *excitingmixing* uses a tuned diagonal Jacobian
approximation.
.. warning::
The algorithms implemented for methods *diagbroyden*,
*linearmixing* and *excitingmixing* may be useful for specific
problems, but whether they will work may depend strongly on the
problem.
.. versionadded:: 0.11.0
References
----------
.. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom.
1980. User Guide for MINPACK-1.
.. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear
Equations. Society for Industrial and Applied Mathematics.
<http://www.siam.org/books/kelley/>
.. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006).
Examples
--------
The following functions define a system of nonlinear equations and its
jacobian.
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
>>> def jac(x):
... return np.array([[1 + 1.5 * (x[0] - x[1])**2,
... -1.5 * (x[0] - x[1])**2],
... [-1.5 * (x[1] - x[0])**2,
... 1 + 1.5 * (x[1] - x[0])**2]])
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr')
>>> sol.x
array([ 0.8411639, 0.1588361])
"""
if not isinstance(args, tuple):
args = (args,)
meth = method.lower()
if options is None:
options = {}
if callback is not None and meth in ('hybr', 'lm'):
warn('Method %s does not accept callback.' % method,
RuntimeWarning)
# fun also returns the jacobian
if not callable(jac) and meth in ('hybr', 'lm'):
if bool(jac):
fun = MemoizeJac(fun)
jac = fun.derivative
else:
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth in ('hybr', 'lm'):
options.setdefault('xtol', tol)
elif meth in ('df-sane',):
options.setdefault('ftol', tol)
elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov'):
options.setdefault('xtol', tol)
options.setdefault('xatol', np.inf)
options.setdefault('ftol', np.inf)
options.setdefault('fatol', np.inf)
if meth == 'hybr':
sol = _root_hybr(fun, x0, args=args, jac=jac, **options)
elif meth == 'lm':
sol = _root_leastsq(fun, x0, args=args, jac=jac, **options)
elif meth == 'df-sane':
_warn_jac_unused(jac, method)
sol = _root_df_sane(fun, x0, args=args, callback=callback,
**options)
elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov'):
_warn_jac_unused(jac, method)
sol = _root_nonlin_solve(fun, x0, args=args, jac=jac,
_method=meth, _callback=callback,
**options)
else:
raise ValueError('Unknown solver %s' % method)
return sol
def _warn_jac_unused(jac, method):
if jac is not None:
warn('Method %s does not use the jacobian (jac).' % (method,),
RuntimeWarning)
def _root_leastsq(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08,
gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None,
**unknown_options):
"""
Solve for least squares with Levenberg-Marquardt
Options
-------
col_deriv : bool
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float
Relative error desired in the sum of squares.
xtol : float
Relative error desired in the approximate solution.
gtol : float
Orthogonality desired between the function vector and the columns
of the Jacobian.
maxiter : int
The maximum number of calls to the function. If zero, then
100*(N+1) is the maximum where N is the number of elements in x0.
epsfcn : float
A suitable step length for the forward-difference approximation of
the Jacobian (for Dfun=None). If epsfcn is less than the machine
precision, it is assumed that the relative errors in the functions
are of the order of the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the variables.
"""
_check_unknown_options(unknown_options)
x, cov_x, info, msg, ier = leastsq(func, x0, args=args, Dfun=jac,
full_output=True,
col_deriv=col_deriv, xtol=xtol,
ftol=ftol, gtol=gtol,
maxfev=maxiter, epsfcn=eps,
factor=factor, diag=diag)
sol = OptimizeResult(x=x, message=msg, status=ier,
success=ier in (1, 2, 3, 4), cov_x=cov_x,
fun=info.pop('fvec'))
sol.update(info)
return sol
def _root_nonlin_solve(func, x0, args=(), jac=None,
_callback=None, _method=None,
nit=None, disp=False, maxiter=None,
ftol=None, fatol=None, xtol=None, xatol=None,
tol_norm=None, line_search='armijo', jac_options=None,
**unknown_options):
_check_unknown_options(unknown_options)
f_tol = fatol
f_rtol = ftol
x_tol = xatol
x_rtol = xtol
verbose = disp
if jac_options is None:
jac_options = dict()
jacobian = {'broyden1': nonlin.BroydenFirst,
'broyden2': nonlin.BroydenSecond,
'anderson': nonlin.Anderson,
'linearmixing': nonlin.LinearMixing,
'diagbroyden': nonlin.DiagBroyden,
'excitingmixing': nonlin.ExcitingMixing,
'krylov': nonlin.KrylovJacobian
}[_method]
if args:
if jac:
def f(x):
return func(x, *args)[0]
else:
def f(x):
return func(x, *args)
else:
f = func
x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options),
iter=nit, verbose=verbose,
maxiter=maxiter, f_tol=f_tol,
f_rtol=f_rtol, x_tol=x_tol,
x_rtol=x_rtol, tol_norm=tol_norm,
line_search=line_search,
callback=_callback, full_output=True,
raise_exception=False)
sol = OptimizeResult(x=x)
sol.update(info)
return sol
def _root_broyden1_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no
extra parameters.
- ``simple``: drop oldest matrix column. Has no
extra parameters.
- ``svd``: keep only the most significant SVD
components.
Extra parameters:
- ``to_retain``: number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
"""
pass
def _root_broyden2_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no
extra parameters.
- ``simple``: drop oldest matrix column. Has no
extra parameters.
- ``svd``: keep only the most significant SVD
components.
Extra parameters:
- ``to_retain``: number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
"""
pass
def _root_anderson_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
"""
pass
def _root_linearmixing_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, ``NoConvergence`` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
"""
pass
def _root_diagbroyden_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
"""
pass
def _root_excitingmixing_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
"""
pass
def _root_krylov_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same
interface as the iterative solvers in
`scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will
be called as ``update(x, f)`` after each nonlinear step,
with ``x`` giving the current point, and ``f`` the current
function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the "inner" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear
iterations.
See `scipy.sparse.linalg.lgmres` for details.
"""
pass
| bsd-3-clause |
iivic/BoiseStateX | lms/djangoapps/instructor/hint_manager.py | 110 | 11466 | """
Views for hint management.
Get to these views through courseurl/hint_manager.
For example: https://courses.edx.org/courses/MITx/2.01x/2013_Spring/hint_manager
These views will only be visible if FEATURES['ENABLE_HINTER_INSTRUCTOR_VIEW'] = True
"""
import json
import re
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response, render_to_string
from courseware.courses import get_course_with_access
from courseware.models import XModuleUserStateSummaryField
import courseware.module_render as module_render
import courseware.model_data as model_data
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.exceptions import ItemNotFoundError
@ensure_csrf_cookie
def hint_manager(request, course_id):
"""
The URL landing function for all calls to the hint manager, both POST and GET.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course_with_access(request.user, 'staff', course_key, depth=None)
except Http404:
out = 'Sorry, but students are not allowed to access the hint manager!'
return HttpResponse(out)
if request.method == 'GET':
out = get_hints(request, course_key, 'mod_queue')
out.update({'error': ''})
return render_to_response('instructor/hint_manager.html', out)
field = request.POST['field']
if not (field == 'mod_queue' or field == 'hints'):
# Invalid field. (Don't let users continue - they may overwrite other db's)
out = 'Error in hint manager - an invalid field was accessed.'
return HttpResponse(out)
switch_dict = {
'delete hints': delete_hints,
'switch fields': lambda *args: None, # Takes any number of arguments, returns None.
'change votes': change_votes,
'add hint': add_hint,
'approve': approve,
}
# Do the operation requested, and collect any error messages.
error_text = switch_dict[request.POST['op']](request, course_key, field)
if error_text is None:
error_text = ''
render_dict = get_hints(request, course_key, field, course=course)
render_dict.update({'error': error_text})
rendered_html = render_to_string('instructor/hint_manager_inner.html', render_dict)
return HttpResponse(json.dumps({'success': True, 'contents': rendered_html}))
def get_hints(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Load all of the hints submitted to the course.
Args:
`request` -- Django request object.
`course_id` -- The course id, like 'Me/19.002/test_course'
`field` -- Either 'hints' or 'mod_queue'; specifies which set of hints to load.
Keys in returned dict:
- 'field': Same as input
- 'other_field': 'mod_queue' if `field` == 'hints'; and vice-versa.
- 'field_label', 'other_field_label': English name for the above.
- 'all_hints': A list of [answer, pk dict] pairs, representing all hints.
Sorted by answer.
- 'id_to_name': A dictionary mapping problem id to problem name.
"""
if field == 'mod_queue':
other_field = 'hints'
field_label = 'Hints Awaiting Moderation'
other_field_label = 'Approved Hints'
elif field == 'hints':
other_field = 'mod_queue'
field_label = 'Approved Hints'
other_field_label = 'Hints Awaiting Moderation'
# We want to use the course_id to find all matching usage_id's.
# To do this, just take the school/number part - leave off the classname.
# FIXME: we need to figure out how to do this with opaque keys
all_hints = XModuleUserStateSummaryField.objects.filter(
field_name=field,
usage_id__regex=re.escape(u'{0.org}/{0.course}'.format(course_id)),
)
# big_out_dict[problem id] = [[answer, {pk: [hint, votes]}], sorted by answer]
# big_out_dict maps a problem id to a list of [answer, hints] pairs, sorted in order of answer.
big_out_dict = {}
# id_to name maps a problem id to the name of the problem.
# id_to_name[problem id] = Display name of problem
id_to_name = {}
for hints_by_problem in all_hints:
hints_by_problem.usage_id = hints_by_problem.usage_id.map_into_course(course_id)
name = location_to_problem_name(course_id, hints_by_problem.usage_id)
if name is None:
continue
id_to_name[hints_by_problem.usage_id] = name
def answer_sorter(thing):
"""
`thing` is a tuple, where `thing[0]` contains an answer, and `thing[1]` contains
a dict of hints. This function returns an index based on `thing[0]`, which
is used as a key to sort the list of things.
"""
try:
return float(thing[0])
except ValueError:
# Put all non-numerical answers first.
return float('-inf')
# Answer list contains [answer, dict_of_hints] pairs.
answer_list = sorted(json.loads(hints_by_problem.value).items(), key=answer_sorter)
big_out_dict[hints_by_problem.usage_id] = answer_list
render_dict = {'field': field,
'other_field': other_field,
'field_label': field_label,
'other_field_label': other_field_label,
'all_hints': big_out_dict,
'id_to_name': id_to_name}
return render_dict
def location_to_problem_name(course_id, loc):
"""
Given the location of a crowdsource_hinter module, try to return the name of the
problem it wraps around. Return None if the hinter no longer exists.
"""
try:
descriptor = modulestore().get_item(loc)
return descriptor.get_children()[0].display_name
except ItemNotFoundError:
# Sometimes, the problem is no longer in the course. Just
# don't include said problem.
return None
def delete_hints(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Deletes the hints specified.
`request.POST` contains some fields keyed by integers. Each such field contains a
[problem_defn_id, answer, pk] tuple. These tuples specify the hints to be deleted.
Example `request.POST`:
{'op': 'delete_hints',
'field': 'mod_queue',
1: ['problem_whatever', '42.0', '3'],
2: ['problem_whatever', '32.5', '12']}
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
# Can be optimized - sort the delete list by problem_id, and load each problem
# from the database only once.
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(this_problem.value)
del problem_dict[answer][pk]
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def change_votes(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Updates the number of votes.
The numbered fields of `request.POST` contain [problem_id, answer, pk, new_votes] tuples.
See `delete_hints`.
Example `request.POST`:
{'op': 'delete_hints',
'field': 'mod_queue',
1: ['problem_whatever', '42.0', '3', 42],
2: ['problem_whatever', '32.5', '12', 9001]}
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk, new_votes = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(this_problem.value)
# problem_dict[answer][pk] points to a [hint_text, #votes] pair.
problem_dict[answer][pk][1] = int(new_votes)
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def add_hint(request, course_id, field, course=None):
"""
Add a new hint. `request.POST`:
op
field
problem - The problem id
answer - The answer to which a hint will be added
hint - The text of the hint
"""
problem_id = request.POST['problem']
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
answer = request.POST['answer']
hint_text = request.POST['hint']
# Validate the answer. This requires initializing the xmodules, which
# is annoying.
try:
descriptor = modulestore().get_item(problem_key)
descriptors = [descriptor]
except ItemNotFoundError:
descriptors = []
field_data_cache = model_data.FieldDataCache(descriptors, course_id, request.user)
hinter_module = module_render.get_module(
request.user,
request,
problem_key,
field_data_cache,
course_id,
course=course
)
if not hinter_module.validate_answer(answer):
# Invalid answer. Don't add it to the database, or else the
# hinter will crash when we encounter it.
return 'Error - the answer you specified is not properly formatted: ' + str(answer)
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
hint_pk_entry = XModuleUserStateSummaryField.objects.get(field_name='hint_pk', usage_id=problem_key)
this_pk = int(hint_pk_entry.value)
hint_pk_entry.value = this_pk + 1
hint_pk_entry.save()
problem_dict = json.loads(this_problem.value)
if answer not in problem_dict:
problem_dict[answer] = {}
problem_dict[answer][this_pk] = [hint_text, 1]
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def approve(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Approve a list of hints, moving them from the mod_queue to the real
hint list. POST:
op, field
(some number) -> [problem, answer, pk]
The numbered fields are analogous to those in `delete_hints` and `change_votes`.
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
# Can be optimized - sort the delete list by problem_id, and load each problem
# from the database only once.
problem_in_mod = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(problem_in_mod.value)
hint_to_move = problem_dict[answer][pk]
del problem_dict[answer][pk]
problem_in_mod.value = json.dumps(problem_dict)
problem_in_mod.save()
problem_in_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=problem_key)
problem_dict = json.loads(problem_in_hints.value)
if answer not in problem_dict:
problem_dict[answer] = {}
problem_dict[answer][pk] = hint_to_move
problem_in_hints.value = json.dumps(problem_dict)
problem_in_hints.save()
| agpl-3.0 |
Immortalin/python-for-android | python-modules/twisted/twisted/internet/test/test_sigchld.py | 57 | 6405 | # Copyright (c) 2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._sigchld}, an alternate, superior SIGCHLD
monitoring API.
"""
import os, signal, errno
from twisted.python.log import msg
from twisted.trial.unittest import TestCase
from twisted.internet.fdesc import setNonBlocking
from twisted.internet._signals import installHandler, isDefaultHandler
from twisted.internet._signals import _extInstallHandler, _extIsDefaultHandler
from twisted.internet._signals import _installHandlerUsingSetWakeup, \
_installHandlerUsingSignal, _isDefaultHandler
class SIGCHLDTestsMixin:
"""
Mixin for L{TestCase} subclasses which defines several tests for
I{installHandler} and I{isDefaultHandler}. Subclasses are expected to
define C{self.installHandler} and C{self.isDefaultHandler} to invoke the
implementation to be tested.
"""
if getattr(signal, 'SIGCHLD', None) is None:
skip = "Platform does not have SIGCHLD"
def installHandler(self, fd):
"""
Override in a subclass to install a SIGCHLD handler which writes a byte
to the given file descriptor. Return the previously registered file
descriptor.
"""
raise NotImplementedError()
def isDefaultHandler(self):
"""
Override in a subclass to determine if the current SIGCHLD handler is
SIG_DFL or not. Return True if it is SIG_DFL, False otherwise.
"""
raise NotImplementedError()
def pipe(self):
"""
Create a non-blocking pipe which will be closed after the currently
running test.
"""
read, write = os.pipe()
self.addCleanup(os.close, read)
self.addCleanup(os.close, write)
setNonBlocking(read)
setNonBlocking(write)
return read, write
def setUp(self):
"""
Save the current SIGCHLD handler as reported by L{signal.signal} and
the current file descriptor registered with L{installHandler}.
"""
handler = signal.getsignal(signal.SIGCHLD)
if handler != signal.SIG_DFL:
self.signalModuleHandler = handler
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
self.signalModuleHandler = None
self.oldFD = self.installHandler(-1)
if self.signalModuleHandler is not None and self.oldFD != -1:
msg("SIGCHLD setup issue: %r %r" % (self.signalModuleHandler, self.oldFD))
raise RuntimeError("You used some signal APIs wrong! Try again.")
def tearDown(self):
"""
Restore whatever signal handler was present when setUp ran.
"""
# If tests set up any kind of handlers, clear them out.
self.installHandler(-1)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# Now restore whatever the setup was before the test ran.
if self.signalModuleHandler is not None:
signal.signal(signal.SIGCHLD, self.signalModuleHandler)
elif self.oldFD != -1:
self.installHandler(self.oldFD)
def test_isDefaultHandler(self):
"""
L{isDefaultHandler} returns true if the SIGCHLD handler is SIG_DFL,
false otherwise.
"""
self.assertTrue(self.isDefaultHandler())
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
self.assertFalse(self.isDefaultHandler())
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
self.assertTrue(self.isDefaultHandler())
signal.signal(signal.SIGCHLD, lambda *args: None)
self.assertFalse(self.isDefaultHandler())
def test_returnOldFD(self):
"""
L{installHandler} returns the previously registered file descriptor.
"""
read, write = self.pipe()
oldFD = self.installHandler(write)
self.assertEqual(self.installHandler(oldFD), write)
def test_uninstallHandler(self):
"""
C{installHandler(-1)} removes the SIGCHLD handler completely.
"""
read, write = self.pipe()
self.assertTrue(self.isDefaultHandler())
self.installHandler(write)
self.assertFalse(self.isDefaultHandler())
self.installHandler(-1)
self.assertTrue(self.isDefaultHandler())
def test_installHandler(self):
"""
The file descriptor passed to L{installHandler} has a byte written to
it when SIGCHLD is delivered to the process.
"""
read, write = self.pipe()
self.installHandler(write)
exc = self.assertRaises(OSError, os.read, read, 1)
self.assertEqual(exc.errno, errno.EAGAIN)
os.kill(os.getpid(), signal.SIGCHLD)
self.assertEqual(len(os.read(read, 5)), 1)
class DefaultSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for whatever implementation is selected for the L{installHandler}
and L{isDefaultHandler} APIs.
"""
installHandler = staticmethod(installHandler)
isDefaultHandler = staticmethod(isDefaultHandler)
class ExtensionSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for the L{twisted.internet._sigchld} implementation of the
L{installHandler} and L{isDefaultHandler} APIs.
"""
try:
import twisted.internet._sigchld
except ImportError:
skip = "twisted.internet._sigchld is not available"
installHandler = _extInstallHandler
isDefaultHandler = _extIsDefaultHandler
class SetWakeupSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for the L{signal.set_wakeup_fd} implementation of the
L{installHandler} and L{isDefaultHandler} APIs.
"""
# Check both of these. On Ubuntu 9.10 (to take an example completely at
# random), Python 2.5 has set_wakeup_fd but not siginterrupt.
if (getattr(signal, 'set_wakeup_fd', None) is None
or getattr(signal, 'siginterrupt', None) is None):
skip = "signal.set_wakeup_fd is not available"
installHandler = staticmethod(_installHandlerUsingSetWakeup)
isDefaultHandler = staticmethod(_isDefaultHandler)
class PlainSignalModuleSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for the L{signal.signal} implementation of the L{installHandler}
and L{isDefaultHandler} APIs.
"""
installHandler = staticmethod(_installHandlerUsingSignal)
isDefaultHandler = staticmethod(_isDefaultHandler)
| apache-2.0 |
bill307/shadowsocks | shadowsocks/udprelay.py | 924 | 11154 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data = encrypt.encrypt_all(self._password, self._method, 0, data)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
data = encrypt.encrypt_all(self._password, self._method, 1, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
# addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
| apache-2.0 |
h3biomed/luigi | test/task_bulk_complete_test.py | 23 | 2415 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2016 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
from luigi import Task
from luigi import Parameter
from luigi.task import MixinNaiveBulkComplete
COMPLETE_TASKS = ["A", "B", "C"]
class MockTask(MixinNaiveBulkComplete, Task):
param_a = Parameter()
param_b = Parameter(default="Not Mandatory")
def complete(self):
return self.param_a in COMPLETE_TASKS
class MixinNaiveBulkCompleteTest(unittest.TestCase):
"""
Test that the MixinNaiveBulkComplete can handle
input as
- iterable of parameters (for single param tasks)
- iterable of parameter tuples (for multi param tasks)
- iterable of parameter dicts (for multi param tasks)
"""
def test_single_arg_list(self):
single_arg_list = ["A", "B", "x"]
expected_single_arg_list = set(
[p for p in single_arg_list if p in COMPLETE_TASKS]
)
self.assertEqual(
expected_single_arg_list,
set(MockTask.bulk_complete(single_arg_list))
)
def test_multiple_arg_tuple(self):
multiple_arg_tuple = (("A", "1"), ("B", "2"), ("X", "3"), ("C", "2"))
expected_multiple_arg_tuple = set(
[p for p in multiple_arg_tuple if p[0] in COMPLETE_TASKS]
)
self.assertEqual(
expected_multiple_arg_tuple,
set(MockTask.bulk_complete(multiple_arg_tuple))
)
def test_multiple_arg_dict(self):
multiple_arg_dict = (
{"param_a": "X", "param_b": "1"},
{"param_a": "C", "param_b": "1"}
)
expected_multiple_arg_dict = (
[p for p in multiple_arg_dict if p["param_a"] in COMPLETE_TASKS]
)
self.assertEqual(
expected_multiple_arg_dict,
MockTask.bulk_complete(multiple_arg_dict)
)
| apache-2.0 |
skipmodea1/plugin.video.xbmctorrent | resources/site-packages/xbmcswift2/constants.py | 34 | 1517 | '''
xbmcswift2.constants
--------------------
This module contains some helpful constants which ease interaction
with XBMC.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
'''
from xbmcswift2 import xbmcplugin
class SortMethod(object):
'''Static class to hold all of the available sort methods. The
sort methods are dynamically imported from xbmcplugin and added as
attributes on this class. The prefix of 'SORT_METHOD_' is
automatically stripped.
e.g. SORT_METHOD_TITLE becomes SortMethod.TITLE
'''
@classmethod
def from_string(cls, sort_method):
'''Returns the sort method specified. sort_method is case insensitive.
Will raise an AttributeError if the provided sort_method does not
exist.
>>> SortMethod.from_string('title')
'''
return getattr(cls, sort_method.upper())
PREFIX = 'SORT_METHOD_'
for attr_name, attr_value in xbmcplugin.__dict__.items():
if attr_name.startswith(PREFIX):
setattr(SortMethod, attr_name[len(PREFIX):], attr_value)
# View mode ids pulled from skins
VIEW_MODES = {
'thumbnail': {
'skin.confluence': 500,
'skin.aeon.nox': 551,
'skin.confluence-vertical': 500,
'skin.jx720': 52,
'skin.pm3-hd': 53,
'skin.rapier': 50,
'skin.simplicity': 500,
'skin.slik': 53,
'skin.touched': 500,
'skin.transparency': 53,
'skin.xeebo': 55,
},
}
| gpl-3.0 |
rembish/homebank-wui | setup.py | 1 | 1175 | #!/usr/bin/env python
try:
from debian.changelog import Changelog
except ImportError:
class Changelog(object):
def __init__(self, _):
pass
def get_version(self):
return '0.0.0'
from os import environ
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
here = abspath(dirname(__file__))
changelog = join(here, 'debian/changelog')
requirements = open(join(here, 'requires.txt')).readlines()
dev_requirements = open(join(here, 'dev_requires.txt')).readlines()
additional = {}
# debhelper setup FAKEROOTKEY variable
if 'FAKEROOTKEY' not in environ:
additional['entry_points'] = {'console_scripts': [
'homebank-cli = homebank.cli:manage'
]}
requirements.extend(dev_requirements)
setup(
name='homebank-wui',
version=str(Changelog(open(changelog)).get_version()),
description='Web User Interface for Homebank',
author='Alex Rembish',
author_email='alex@rembish.org',
url='https://github.com/rembish/homebank-wui',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requirements,
**additional)
| bsd-3-clause |
retomerz/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/plugins/do_exchain.py | 100 | 2307 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Command line debugger using WinAppDbg
# Show exception handlers list
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__revision__ = "$Id$"
from winappdbg import HexDump, Table
def do(self, arg):
".exchain - Show the SEH chain"
thread = self.get_thread_from_prefix()
print "Exception handlers for thread %d" % thread.get_tid()
print
table = Table()
table.addRow("Block", "Function")
bits = thread.get_bits()
for (seh, seh_func) in thread.get_seh_chain():
if seh is not None:
seh = HexDump.address(seh, bits)
if seh_func is not None:
seh_func = HexDump.address(seh_func, bits)
table.addRow(seh, seh_func)
print table.getOutput()
| apache-2.0 |
SDSG-Invenio/invenio | invenio/legacy/websubmit/functions/Create_Cplx_Approval.py | 13 | 2580 | # This file is part of Invenio.
# Copyright (C) 2007, 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
## Description: function Update_Approval_DB
## This function updates the approval database with the
## decision of the referee
## Author: T.Baron
## PARAMETERS: categformatDAM: variable used to compute the category
## of the document from its reference
import os
import re
from invenio.legacy.dbquery import run_sql
def Create_Cplx_Approval(parameters, curdir, form, user_info=None):
global rn
doctype = form['doctype']
act = form['act']
#categformat = parameters['categformatDAM']
#Obtain the document category from combo<DOCTYPE> file
category = ""
if os.path.exists("%s/%s" % (curdir,'combo'+doctype)):
fp = open("%s/%s" % (curdir,'combo'+doctype),"r")
category = fp.read()
else:
return ""
#Path of file containing group
group_id = ""
if os.path.exists("%s/%s" % (curdir,'Group')):
fp = open("%s/%s" % (curdir,'Group'),"r")
group = fp.read()
group = group.replace("/","_")
group = re.sub("[\n\r]+","",group)
group_id = run_sql ("""SELECT id FROM usergroup WHERE name = %s""", (group,))[0][0]
else:
return ""
sth = run_sql("SELECT rn FROM sbmCPLXAPPROVAL WHERE doctype=%s and categ=%s and rn=%s and type=%s and id_group=%s", (doctype,category,rn,act,group_id))
if len(sth) == 0:
run_sql("INSERT INTO sbmCPLXAPPROVAL values(%s,%s,%s,%s,'waiting',%s,'','',NOW(),NOW(),'','','','','','')",(doctype,category,rn,act,group_id,))
else:
run_sql("UPDATE sbmCPLXAPPROVAL SET dLastReq=NOW(), status='waiting', dProjectLeaderAction='' WHERE doctype=%s and categ=%s and rn=%s and type=%s and id_group=%s", (doctype,category,rn,act,group_id))
return ""
| gpl-2.0 |
jendap/tensorflow | tensorflow/contrib/constrained_optimization/__init__.py | 39 | 1769 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library for performing constrained optimization in TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.constrained_optimization.python.candidates import *
from tensorflow.contrib.constrained_optimization.python.constrained_minimization_problem import *
from tensorflow.contrib.constrained_optimization.python.constrained_optimizer import *
from tensorflow.contrib.constrained_optimization.python.external_regret_optimizer import *
from tensorflow.contrib.constrained_optimization.python.swap_regret_optimizer import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"AdditiveExternalRegretOptimizer",
"AdditiveSwapRegretOptimizer",
"ConstrainedMinimizationProblem",
"ConstrainedOptimizer",
"find_best_candidate_distribution",
"find_best_candidate_index",
"MultiplicativeSwapRegretOptimizer",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
polimediaupv/edx-platform | lms/djangoapps/mobile_api/social_facebook/friends/views.py | 85 | 2439 | """
Views for friends info API
"""
from rest_framework import generics, status
from rest_framework.response import Response
from opaque_keys.edx.keys import CourseKey
from student.models import CourseEnrollment
from ...utils import mobile_view
from ..utils import get_friends_from_facebook, get_linked_edx_accounts, share_with_facebook_friends
from lms.djangoapps.mobile_api.social_facebook.friends import serializers
@mobile_view()
class FriendsInCourse(generics.ListAPIView):
"""
**Use Case**
API endpoint that returns all the users friends that are in the course specified.
Note that only friends that allow their courses to be shared will be included.
**Example request**:
GET /api/mobile/v0.5/social/facebook/friends/course/<course_id>
where course_id is in the form of /edX/DemoX/Demo_Course
**Response Values**
{
"friends": [
{
"name": "test",
"id": "12345",
},
...
]
}
"""
serializer_class = serializers.FriendsInCourseSerializer
def list(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.GET, files=request.FILES)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Get all the user's FB friends
result = get_friends_from_facebook(serializer)
if not isinstance(result, list):
return result
def is_member(friend, course_key):
"""
Return true if friend is a member of the course specified by the course_key
"""
return CourseEnrollment.objects.filter(
course_id=course_key,
user_id=friend['edX_id']
).count() == 1
# For each friend check if they are a linked edX user
friends_with_edx_users = get_linked_edx_accounts(result)
# Filter by sharing preferences and enrollment in course
course_key = CourseKey.from_string(kwargs['course_id'])
friends_with_sharing_in_course = [
{'id': friend['id'], 'name': friend['name']}
for friend in friends_with_edx_users
if share_with_facebook_friends(friend) and is_member(friend, course_key)
]
return Response({'friends': friends_with_sharing_in_course})
| agpl-3.0 |
kalxas/QGIS | tests/src/python/test_qgslayout.py | 30 | 25611 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayout
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '18/07/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.PyQt import sip
import tempfile
import shutil
import os
from qgis.core import (QgsUnitTypes,
QgsLayout,
QgsLayoutItemPage,
QgsLayoutGuide,
QgsLayoutObject,
QgsProject,
QgsPrintLayout,
QgsLayoutItemGroup,
QgsLayoutItem,
QgsLayoutItemHtml,
QgsProperty,
QgsLayoutPageCollection,
QgsLayoutMeasurement,
QgsLayoutFrame,
QgsFillSymbol,
QgsReadWriteContext,
QgsLayoutItemMap,
QgsLayoutItemLabel,
QgsLayoutSize,
QgsLayoutPoint)
from qgis.PyQt.QtCore import Qt, QCoreApplication, QEvent, QPointF, QRectF
from qgis.PyQt.QtTest import QSignalSpy
from qgis.PyQt.QtXml import QDomDocument
from qgis.testing import start_app, unittest
start_app()
class TestQgsLayout(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.basetestpath = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
shutil.rmtree(cls.basetestpath, True)
def testReadWriteXml(self):
p = QgsProject()
l = QgsPrintLayout(p)
l.setName('my layout')
l.setUnits(QgsUnitTypes.LayoutInches)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A6')
collection.addPage(page)
grid = l.gridSettings()
grid.setResolution(QgsLayoutMeasurement(5, QgsUnitTypes.LayoutPoints))
g1 = QgsLayoutGuide(Qt.Horizontal, QgsLayoutMeasurement(5, QgsUnitTypes.LayoutCentimeters),
l.pageCollection().page(0))
l.guides().addGuide(g1)
snapper = l.snapper()
snapper.setSnapTolerance(7)
# add some items
item1 = QgsLayoutItemMap(l)
item1.setId('xxyyxx')
l.addItem(item1)
item2 = QgsLayoutItemMap(l)
item2.setId('zzyyzz')
l.addItem(item2)
l.setReferenceMap(item2)
doc = QDomDocument("testdoc")
elem = l.writeXml(doc, QgsReadWriteContext())
l2 = QgsPrintLayout(p)
self.assertTrue(l2.readXml(elem, doc, QgsReadWriteContext()))
self.assertEqual(l2.name(), 'my layout')
self.assertEqual(l2.units(), QgsUnitTypes.LayoutInches)
collection2 = l2.pageCollection()
self.assertEqual(collection2.pageCount(), 1)
self.assertAlmostEqual(collection2.page(0).pageSize().width(), 105, 4)
self.assertEqual(collection2.page(0).pageSize().height(), 148)
self.assertEqual(l2.gridSettings().resolution().length(), 5.0)
self.assertEqual(l2.gridSettings().resolution().units(), QgsUnitTypes.LayoutPoints)
self.assertEqual(l2.guides().guidesOnPage(0)[0].orientation(), Qt.Horizontal)
self.assertEqual(l2.guides().guidesOnPage(0)[0].position().length(), 5.0)
self.assertEqual(l2.guides().guidesOnPage(0)[0].position().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(l2.snapper().snapTolerance(), 7)
# check restored items
new_item1 = l2.itemByUuid(item1.uuid())
self.assertTrue(new_item1)
self.assertEqual(new_item1.id(), 'xxyyxx')
new_item2 = l2.itemByUuid(item2.uuid())
self.assertTrue(new_item2)
self.assertEqual(new_item2.id(), 'zzyyzz')
self.assertEqual(l2.referenceMap().id(), 'zzyyzz')
def testAddItemsFromXml(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemLabel(l)
item1.setId('xxyyxx')
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
item2 = QgsLayoutItemLabel(l)
item2.setId('zzyyzz')
item2.attemptMove(QgsLayoutPoint(1.4, 1.8, QgsUnitTypes.LayoutCentimeters))
item2.attemptResize(QgsLayoutSize(2.8, 2.2, QgsUnitTypes.LayoutCentimeters))
l.addItem(item2)
doc = QDomDocument("testdoc")
# store in xml
elem = l.writeXml(doc, QgsReadWriteContext())
l2 = QgsLayout(p)
new_items = l2.addItemsFromXml(elem, doc, QgsReadWriteContext())
self.assertEqual(len(new_items), 2)
items = l2.items()
self.assertTrue([i for i in items if i.id() == 'xxyyxx'])
self.assertTrue([i for i in items if i.id() == 'zzyyzz'])
self.assertTrue(new_items[0] in l2.items())
self.assertTrue(new_items[1] in l2.items())
new_item1 = [i for i in items if i.id() == 'xxyyxx'][0]
new_item2 = [i for i in items if i.id() == 'zzyyzz'][0]
self.assertEqual(new_item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(new_item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(new_item2.positionWithUnits(), QgsLayoutPoint(1.4, 1.8, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(new_item2.sizeWithUnits(), QgsLayoutSize(2.8, 2.2, QgsUnitTypes.LayoutCentimeters))
# test with a group
group = QgsLayoutItemGroup(l)
group.addItem(item1)
group.addItem(item2)
l.addLayoutItem(group)
elem = l.writeXml(doc, QgsReadWriteContext())
l3 = QgsLayout(p)
new_items = l3.addItemsFromXml(elem, doc, QgsReadWriteContext())
self.assertEqual(len(new_items), 3)
items = l3.items()
self.assertTrue([i for i in items if i.id() == 'xxyyxx'])
self.assertTrue([i for i in items if i.id() == 'zzyyzz'])
self.assertTrue(new_items[0] in l3.items())
self.assertTrue(new_items[1] in l3.items())
self.assertTrue(new_items[2] in l3.items())
# f*** you sip, I'll just manually cast
new_group = sip.cast(l3.itemByUuid(group.uuid()), QgsLayoutItemGroup)
self.assertIsNotNone(new_group)
other_items = [i for i in new_items if i.type() != new_group.type()]
self.assertCountEqual(new_group.items(), other_items)
# test restoring at set position
l3 = QgsLayout(p)
new_items = l3.addItemsFromXml(elem, doc, QgsReadWriteContext(), QPointF(10, 30))
self.assertEqual(len(new_items), 3)
items = l3.items()
new_item1 = [i for i in items if i.id() == 'xxyyxx'][0]
new_item2 = [i for i in items if i.id() == 'zzyyzz'][0]
self.assertEqual(new_item1.positionWithUnits(), QgsLayoutPoint(10, 30, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(new_item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(new_item2.positionWithUnits(), QgsLayoutPoint(2.0, 4.0, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(new_item2.sizeWithUnits(), QgsLayoutSize(2.8, 2.2, QgsUnitTypes.LayoutCentimeters))
# paste in place
l4 = QgsLayout(p)
page = QgsLayoutItemPage(l)
page.setPageSize('A3')
l4.pageCollection().addPage(page)
page = QgsLayoutItemPage(l)
page.setPageSize('A6')
l4.pageCollection().addPage(page)
new_items = l4.addItemsFromXml(elem, doc, QgsReadWriteContext(), QPointF(10, 30), True)
self.assertEqual(len(new_items), 3)
new_item1 = [i for i in new_items if i.id() == 'xxyyxx'][0]
new_item2 = [i for i in new_items if i.id() == 'zzyyzz'][0]
self.assertEqual(new_item1.pagePositionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(new_item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(new_item1.page(), 0)
self.assertEqual(new_item2.pagePositionWithUnits(), QgsLayoutPoint(1.4, 1.8, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(new_item2.sizeWithUnits(), QgsLayoutSize(2.8, 2.2, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(new_item2.page(), 0)
# paste in place, page 2
new_items = l4.addItemsFromXml(elem, doc, QgsReadWriteContext(), QPointF(10, 550), True)
self.assertEqual(len(new_items), 3)
new_item1 = [i for i in new_items if i.id() == 'xxyyxx'][0]
new_item2 = [i for i in new_items if i.id() == 'zzyyzz'][0]
self.assertEqual(new_item1.pagePositionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(new_item1.page(), 1)
self.assertEqual(new_item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(new_item2.pagePositionWithUnits(), QgsLayoutPoint(1.4, 1.8, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(new_item2.page(), 1)
self.assertEqual(new_item2.sizeWithUnits(), QgsLayoutSize(2.8, 2.2, QgsUnitTypes.LayoutCentimeters))
# TODO - test restoring multiframe
def testSaveLoadTemplate(self):
tmpfile = os.path.join(self.basetestpath, 'testTemplate.qpt')
p = QgsProject()
l = QgsLayout(p)
l.initializeDefaults()
# add some items
item1 = QgsLayoutItemLabel(l)
item1.setId('xxyyxx')
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
item2 = QgsLayoutItemLabel(l)
item2.setId('zzyyzz')
item2.attemptMove(QgsLayoutPoint(1.4, 1.8, QgsUnitTypes.LayoutCentimeters))
item2.attemptResize(QgsLayoutSize(2.8, 2.2, QgsUnitTypes.LayoutCentimeters))
l.addItem(item2)
# multiframe
multiframe1 = QgsLayoutItemHtml(l)
multiframe1.setHtml('mf1')
l.addMultiFrame(multiframe1)
frame1 = QgsLayoutFrame(l, multiframe1)
frame1.setId('frame1')
frame1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
frame1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
multiframe1.addFrame(frame1)
multiframe2 = QgsLayoutItemHtml(l)
multiframe2.setHtml('mf2')
l.addMultiFrame(multiframe2)
frame2 = QgsLayoutFrame(l, multiframe2)
frame2.setId('frame2')
frame2.attemptMove(QgsLayoutPoint(1.4, 1.8, QgsUnitTypes.LayoutCentimeters))
frame2.attemptResize(QgsLayoutSize(2.8, 2.2, QgsUnitTypes.LayoutCentimeters))
multiframe2.addFrame(frame2)
uuids = {item1.uuid(), item2.uuid(), frame1.uuid(), frame2.uuid(), multiframe1.uuid(), multiframe2.uuid()}
original_uuids = {item1.uuid(), item2.uuid(), frame1.uuid(), frame2.uuid()}
self.assertTrue(l.saveAsTemplate(tmpfile, QgsReadWriteContext()))
l2 = QgsLayout(p)
with open(tmpfile) as f:
template_content = f.read()
doc = QDomDocument()
doc.setContent(template_content)
# adding to existing items
new_items, ok = l2.loadFromTemplate(doc, QgsReadWriteContext(), False)
self.assertTrue(ok)
self.assertEqual(len(new_items), 4)
items = l2.items()
multiframes = l2.multiFrames()
self.assertEqual(len(multiframes), 2)
self.assertTrue([i for i in items if i.id() == 'xxyyxx'])
self.assertTrue([i for i in items if i.id() == 'zzyyzz'])
self.assertTrue([i for i in items if i.id() == 'frame1'])
self.assertTrue([i for i in items if i.id() == 'frame2'])
self.assertTrue([i for i in multiframes if i.html() == 'mf1'])
self.assertTrue([i for i in multiframes if i.html() == 'mf2'])
self.assertTrue(new_items[0] in l2.items())
self.assertTrue(new_items[1] in l2.items())
self.assertTrue(new_items[2] in l2.items())
self.assertTrue(new_items[3] in l2.items())
# double check that new items have a unique uid
self.assertNotIn(new_items[0].uuid(), uuids)
uuids.add(new_items[0].uuid())
self.assertNotIn(new_items[1].uuid(), uuids)
uuids.add(new_items[1].uuid())
self.assertNotIn(new_items[2].uuid(), uuids)
uuids.add(new_items[2].uuid())
self.assertNotIn(new_items[3].uuid(), uuids)
uuids.add(new_items[3].uuid())
self.assertNotIn(multiframes[0].uuid(), [multiframe1.uuid(), multiframe2.uuid()])
self.assertNotIn(multiframes[1].uuid(), [multiframe1.uuid(), multiframe2.uuid()])
new_multiframe1 = [i for i in multiframes if i.html() == 'mf1'][0]
self.assertEqual(new_multiframe1.layout(), l2)
new_multiframe2 = [i for i in multiframes if i.html() == 'mf2'][0]
self.assertEqual(new_multiframe2.layout(), l2)
new_frame1 = sip.cast([i for i in items if i.id() == 'frame1'][0], QgsLayoutFrame)
new_frame2 = sip.cast([i for i in items if i.id() == 'frame2'][0], QgsLayoutFrame)
self.assertEqual(new_frame1.multiFrame(), new_multiframe1)
self.assertEqual(new_multiframe1.frames()[0].uuid(), new_frame1.uuid())
self.assertEqual(new_frame2.multiFrame(), new_multiframe2)
self.assertEqual(new_multiframe2.frames()[0].uuid(), new_frame2.uuid())
# adding to existing items
new_items2, ok = l2.loadFromTemplate(doc, QgsReadWriteContext(), False)
self.assertTrue(ok)
self.assertEqual(len(new_items2), 4)
items = l2.items()
self.assertEqual(len(items), 8)
multiframes2 = l2.multiFrames()
self.assertEqual(len(multiframes2), 4)
multiframes2 = [m for m in l2.multiFrames() if not m.uuid() in [new_multiframe1.uuid(), new_multiframe2.uuid()]]
self.assertEqual(len(multiframes2), 2)
self.assertTrue([i for i in items if i.id() == 'xxyyxx'])
self.assertTrue([i for i in items if i.id() == 'zzyyzz'])
self.assertTrue([i for i in items if i.id() == 'frame1'])
self.assertTrue([i for i in items if i.id() == 'frame2'])
self.assertTrue([i for i in multiframes2 if i.html() == 'mf1'])
self.assertTrue([i for i in multiframes2 if i.html() == 'mf2'])
self.assertTrue(new_items[0] in l2.items())
self.assertTrue(new_items[1] in l2.items())
self.assertTrue(new_items[2] in l2.items())
self.assertTrue(new_items[3] in l2.items())
self.assertTrue(new_items2[0] in l2.items())
self.assertTrue(new_items2[1] in l2.items())
self.assertTrue(new_items2[2] in l2.items())
self.assertTrue(new_items2[3] in l2.items())
self.assertNotIn(new_items2[0].uuid(), uuids)
uuids.add(new_items[0].uuid())
self.assertNotIn(new_items2[1].uuid(), uuids)
uuids.add(new_items[1].uuid())
self.assertNotIn(new_items2[2].uuid(), uuids)
uuids.add(new_items[2].uuid())
self.assertNotIn(new_items2[3].uuid(), uuids)
uuids.add(new_items[3].uuid())
self.assertNotIn(multiframes2[0].uuid(),
[multiframe1.uuid(), multiframe2.uuid(), new_multiframe1.uuid(), new_multiframe2.uuid()])
self.assertNotIn(multiframes2[1].uuid(),
[multiframe1.uuid(), multiframe2.uuid(), new_multiframe1.uuid(), new_multiframe2.uuid()])
new_multiframe1b = [i for i in multiframes2 if i.html() == 'mf1'][0]
self.assertEqual(new_multiframe1b.layout(), l2)
new_multiframe2b = [i for i in multiframes2 if i.html() == 'mf2'][0]
self.assertEqual(new_multiframe2b.layout(), l2)
new_frame1b = sip.cast([i for i in items if i.id() == 'frame1' and i.uuid() != new_frame1.uuid()][0],
QgsLayoutFrame)
new_frame2b = sip.cast([i for i in items if i.id() == 'frame2' and i.uuid() != new_frame2.uuid()][0],
QgsLayoutFrame)
self.assertEqual(new_frame1b.multiFrame(), new_multiframe1b)
self.assertEqual(new_multiframe1b.frames()[0].uuid(), new_frame1b.uuid())
self.assertEqual(new_frame2b.multiFrame(), new_multiframe2b)
self.assertEqual(new_multiframe2b.frames()[0].uuid(), new_frame2b.uuid())
# clearing existing items
new_items3, ok = l2.loadFromTemplate(doc, QgsReadWriteContext(), True)
new_multiframes = l2.multiFrames()
self.assertTrue(ok)
self.assertEqual(len(new_items3), 5) # includes page
self.assertEqual(len(new_multiframes), 2)
items = l2.items()
self.assertTrue([i for i in items if isinstance(i, QgsLayoutItem) and i.id() == 'xxyyxx'])
self.assertTrue([i for i in items if isinstance(i, QgsLayoutItem) and i.id() == 'zzyyzz'])
self.assertTrue([i for i in items if isinstance(i, QgsLayoutItem) and i.id() == 'frame1'])
self.assertTrue([i for i in items if isinstance(i, QgsLayoutItem) and i.id() == 'frame2'])
self.assertTrue(new_items3[0] in l2.items())
self.assertTrue(new_items3[1] in l2.items())
self.assertTrue(new_items3[2] in l2.items())
self.assertTrue(new_items3[3] in l2.items())
new_multiframe1 = [i for i in new_multiframes if i.html() == 'mf1'][0]
new_multiframe2 = [i for i in new_multiframes if i.html() == 'mf2'][0]
new_frame1 = sip.cast([i for i in items if isinstance(i, QgsLayoutItem) and i.id() == 'frame1'][0],
QgsLayoutFrame)
new_frame2 = sip.cast([i for i in items if isinstance(i, QgsLayoutItem) and i.id() == 'frame2'][0],
QgsLayoutFrame)
self.assertEqual(new_frame1.multiFrame(), new_multiframe1)
self.assertEqual(new_multiframe1.frames()[0].uuid(), new_frame1.uuid())
self.assertEqual(new_frame2.multiFrame(), new_multiframe2)
self.assertEqual(new_multiframe2.frames()[0].uuid(), new_frame2.uuid())
def testSelectedItems(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemMap(l)
l.addItem(item1)
item2 = QgsLayoutItemMap(l)
l.addItem(item2)
item3 = QgsLayoutItemMap(l)
l.addItem(item3)
self.assertFalse(l.selectedLayoutItems())
item1.setSelected(True)
self.assertEqual(set(l.selectedLayoutItems()), set([item1]))
item2.setSelected(True)
self.assertEqual(set(l.selectedLayoutItems()), set([item1, item2]))
item3.setSelected(True)
self.assertEqual(set(l.selectedLayoutItems()), set([item1, item2, item3]))
item3.setLocked(True)
self.assertEqual(set(l.selectedLayoutItems(False)), set([item1, item2]))
self.assertEqual(set(l.selectedLayoutItems(True)), set([item1, item2, item3]))
def testSelections(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemMap(l)
l.addItem(item1)
item2 = QgsLayoutItemMap(l)
l.addItem(item2)
item3 = QgsLayoutItemMap(l)
l.addItem(item3)
select_changed_spy = QSignalSpy(l.selectedItemChanged)
l.setSelectedItem(None)
self.assertFalse(l.selectedLayoutItems())
self.assertEqual(len(select_changed_spy), 1)
self.assertEqual(select_changed_spy[-1][0], None)
l.setSelectedItem(item1)
self.assertEqual(l.selectedLayoutItems(), [item1])
self.assertEqual(len(select_changed_spy), 2)
self.assertEqual(select_changed_spy[-1][0], item1)
l.setSelectedItem(None)
self.assertFalse(l.selectedLayoutItems())
self.assertEqual(len(select_changed_spy), 3)
self.assertEqual(select_changed_spy[-1][0], None)
l.setSelectedItem(item2)
self.assertEqual(l.selectedLayoutItems(), [item2])
self.assertEqual(len(select_changed_spy), 4)
self.assertEqual(select_changed_spy[-1][0], item2)
l.deselectAll()
self.assertFalse(l.selectedLayoutItems())
self.assertEqual(len(select_changed_spy), 5)
self.assertEqual(select_changed_spy[-1][0], None)
def testLayoutItemAt(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemMap(l)
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
item2 = QgsLayoutItemMap(l)
item2.attemptMove(QgsLayoutPoint(6, 10, QgsUnitTypes.LayoutMillimeters))
item2.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item2)
item3 = QgsLayoutItemMap(l)
item3.attemptMove(QgsLayoutPoint(8, 12, QgsUnitTypes.LayoutMillimeters))
item3.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
item3.setLocked(True)
l.addItem(item3)
self.assertIsNone(l.layoutItemAt(QPointF(0, 0)))
self.assertIsNone(l.layoutItemAt(QPointF(100, 100)))
self.assertEqual(l.layoutItemAt(QPointF(5, 9)), item1)
self.assertEqual(l.layoutItemAt(QPointF(25, 23)), item3)
self.assertIsNone(l.layoutItemAt(QPointF(25, 23), True))
self.assertEqual(l.layoutItemAt(QPointF(7, 11)), item2)
self.assertEqual(l.layoutItemAt(QPointF(9, 13)), item3)
self.assertEqual(l.layoutItemAt(QPointF(9, 13), True), item2)
self.assertEqual(l.layoutItemAt(QPointF(9, 13), item3), item2)
self.assertEqual(l.layoutItemAt(QPointF(9, 13), item2), item1)
self.assertIsNone(l.layoutItemAt(QPointF(9, 13), item1))
item2.setLocked(True)
self.assertEqual(l.layoutItemAt(QPointF(9, 13), item3, True), item1)
def testStacking(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemMap(l)
l.addLayoutItem(item1)
item2 = QgsLayoutItemMap(l)
l.addLayoutItem(item2)
item3 = QgsLayoutItemMap(l)
l.addLayoutItem(item3)
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 2)
self.assertEqual(item3.zValue(), 3)
# no effect interactions
self.assertFalse(l.raiseItem(None))
self.assertFalse(l.lowerItem(None))
self.assertFalse(l.moveItemToTop(None))
self.assertFalse(l.moveItemToBottom(None))
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 2)
self.assertEqual(item3.zValue(), 3)
# raising
self.assertFalse(l.raiseItem(item3))
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 2)
self.assertEqual(item3.zValue(), 3)
self.assertTrue(l.raiseItem(item2))
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 2)
self.assertFalse(l.raiseItem(item2))
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 2)
self.assertTrue(l.raiseItem(item1))
self.assertEqual(item1.zValue(), 2)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 1)
# lower
self.assertFalse(l.lowerItem(item3))
self.assertEqual(item1.zValue(), 2)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 1)
self.assertTrue(l.lowerItem(item2))
self.assertEqual(item1.zValue(), 3)
self.assertEqual(item2.zValue(), 2)
self.assertEqual(item3.zValue(), 1)
self.assertTrue(l.lowerItem(item2))
self.assertEqual(item1.zValue(), 3)
self.assertEqual(item2.zValue(), 1)
self.assertEqual(item3.zValue(), 2)
# raise to top
self.assertFalse(l.moveItemToTop(item1))
self.assertEqual(item1.zValue(), 3)
self.assertEqual(item2.zValue(), 1)
self.assertEqual(item3.zValue(), 2)
self.assertTrue(l.moveItemToTop(item3))
self.assertEqual(item1.zValue(), 2)
self.assertEqual(item2.zValue(), 1)
self.assertEqual(item3.zValue(), 3)
self.assertTrue(l.moveItemToTop(item2))
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 2)
# move to bottom
self.assertFalse(l.moveItemToBottom(item1))
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 2)
self.assertTrue(l.moveItemToBottom(item3))
self.assertEqual(item1.zValue(), 2)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 1)
self.assertTrue(l.moveItemToBottom(item2))
self.assertEqual(item1.zValue(), 3)
self.assertEqual(item2.zValue(), 1)
self.assertEqual(item3.zValue(), 2)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
ajayuranakar/django-blog | lib/python2.7/site-packages/django/contrib/gis/db/backends/oracle/schema.py | 608 | 4050 | from django.contrib.gis.db.models.fields import GeometryField
from django.db.backends.oracle.schema import DatabaseSchemaEditor
from django.db.backends.utils import truncate_name
class OracleGISSchemaEditor(DatabaseSchemaEditor):
sql_add_geometry_metadata = ("""
INSERT INTO USER_SDO_GEOM_METADATA
("TABLE_NAME", "COLUMN_NAME", "DIMINFO", "SRID")
VALUES (
%(table)s,
%(column)s,
MDSYS.SDO_DIM_ARRAY(
MDSYS.SDO_DIM_ELEMENT('LONG', %(dim0)s, %(dim2)s, %(tolerance)s),
MDSYS.SDO_DIM_ELEMENT('LAT', %(dim1)s, %(dim3)s, %(tolerance)s)
),
%(srid)s
)""")
sql_add_spatial_index = 'CREATE INDEX %(index)s ON %(table)s(%(column)s) INDEXTYPE IS MDSYS.SPATIAL_INDEX'
sql_drop_spatial_index = 'DROP INDEX %(index)s'
sql_clear_geometry_table_metadata = 'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s'
sql_clear_geometry_field_metadata = (
'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s '
'AND COLUMN_NAME = %(column)s'
)
def __init__(self, *args, **kwargs):
super(OracleGISSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
column_sql = super(OracleGISSchemaEditor, self).column_sql(model, field, include_default)
if isinstance(field, GeometryField):
db_table = model._meta.db_table
self.geometry_sql.append(
self.sql_add_geometry_metadata % {
'table': self.geo_quote_name(db_table),
'column': self.geo_quote_name(field.column),
'dim0': field._extent[0],
'dim1': field._extent[1],
'dim2': field._extent[2],
'dim3': field._extent[3],
'tolerance': field._tolerance,
'srid': field.srid,
}
)
if field.spatial_index:
self.geometry_sql.append(
self.sql_add_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
'table': self.quote_name(db_table),
'column': self.quote_name(field.column),
}
)
return column_sql
def create_model(self, model):
super(OracleGISSchemaEditor, self).create_model(model)
self.run_geometry_sql()
def delete_model(self, model):
super(OracleGISSchemaEditor, self).delete_model(model)
self.execute(self.sql_clear_geometry_table_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
})
def add_field(self, model, field):
super(OracleGISSchemaEditor, self).add_field(model, field)
self.run_geometry_sql()
def remove_field(self, model, field):
if isinstance(field, GeometryField):
self.execute(self.sql_clear_geometry_field_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
'column': self.geo_quote_name(field.column),
})
if field.spatial_index:
self.execute(self.sql_drop_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
})
super(OracleGISSchemaEditor, self).remove_field(model, field)
def run_geometry_sql(self):
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def _create_spatial_index_name(self, model, field):
# Oracle doesn't allow object names > 30 characters. Use this scheme
# instead of self._create_index_name() for backwards compatibility.
return truncate_name('%s_%s_id' % (model._meta.db_table, field.column), 30)
| gpl-3.0 |
Spoken-tutorial/spoken-website | spoken/forms.py | 1 | 13015 | # Third Party Stuff
from builtins import str
from builtins import object
from django import forms
from django.db.models import Count, Q
from django.core.exceptions import ValidationError
# Spoken Tutorial Stuff
from creation.models import TutorialResource, FossCategory
from events.models import Testimonials, InductionInterest, MediaTestimonials
class KeywordSearchForm(forms.Form):
q = forms.CharField(required=True)
class AllTutorialSearchForm(forms.Form):
search_foss = forms.ChoiceField(
choices = [],
widget=forms.Select(),
required = False,
)
search_language = forms.ChoiceField(
choices=[],
widget=forms.Select(),
required=False,
)
def __init__(self, *args, **kwargs):
super(AllTutorialSearchForm, self).__init__(*args, **kwargs)
foss_list_choices = [('', '-- All Courses --'),]
lang_list_choices =[('', '-- All Languages --'),]
foss_list = TutorialResource.objects.filter(Q(status=1) | Q(status=2), language__name='English').values('tutorial_detail__foss__foss').annotate(
Count('id')).order_by('tutorial_detail__foss__foss').values_list('tutorial_detail__foss__foss', 'id__count').distinct()
for foss_row in foss_list:
foss_list_choices.append((str(foss_row[0]), str(foss_row[0]) + ' (' + str(foss_row[1]) + ')'))
lang_list = TutorialResource.objects.filter(Q(status=1) | Q(status=2)).values('language__name').annotate(
Count('id')).order_by('language').values_list('language__name', 'id__count').distinct()
for lang_row in lang_list:
lang_list_choices.append((str(lang_row[0]), str(lang_row[0]) + ' (' + str(lang_row[1]) + ')'))
self.fields['search_foss'].choices = foss_list_choices
self.fields['search_language'].choices = lang_list_choices
class TutorialSearchForm(forms.Form):
search_foss = forms.ChoiceField(
choices=[],
widget=forms.Select(),
required=False,
)
search_language = forms.ChoiceField(
choices=[],
widget=forms.Select(),
required=False,
)
def __init__(self, *args, **kwargs):
super(TutorialSearchForm, self).__init__(*args, **kwargs)
foss_list_choices = [('', '-- All Courses --'), ]
lang_list_choices = [('', '-- All Languages --'), ]
foss_list = TutorialResource.objects.filter(Q(status=1) | Q(status=2), language__name='English', tutorial_detail__foss__show_on_homepage=1).values('tutorial_detail__foss__foss').annotate(
Count('id')).order_by('tutorial_detail__foss__foss').values_list('tutorial_detail__foss__foss', 'id__count').distinct()
for foss_row in foss_list:
foss_list_choices.append((str(foss_row[0]), str(foss_row[0]) + ' (' + str(foss_row[1]) + ')'))
lang_list = TutorialResource.objects.filter(Q(status=1) | Q(status=2), tutorial_detail__foss__show_on_homepage=1).values('language__name').annotate(
Count('id')).order_by('language').values_list('language__name', 'id__count').distinct()
for lang_row in lang_list:
lang_list_choices.append((str(lang_row[0]), str(lang_row[0]) + ' (' + str(lang_row[1]) + ')'))
self.fields['search_foss'].choices = foss_list_choices
self.fields['search_language'].choices = lang_list_choices
class SeriesTutorialSearchForm(forms.Form):
search_otherfoss = forms.ChoiceField(
choices=[],
widget=forms.Select(),
required=False,
)
search_otherlanguage = forms.ChoiceField(
choices=[],
widget=forms.Select(),
required=False,
)
def __init__(self, *args, **kwargs):
super(SeriesTutorialSearchForm, self).__init__(*args, **kwargs)
foss_list_choices = [('', '-- All Courses --'), ]
lang_list_choices = [('', '-- All Languages --'), ]
foss_list = TutorialResource.objects.filter(Q(status=1) | Q(status=2), language__name='English', tutorial_detail__foss__show_on_homepage=0).values('tutorial_detail__foss__foss').annotate(
Count('id')).order_by('tutorial_detail__foss__foss').values_list('tutorial_detail__foss__foss', 'id__count').distinct()
for foss_row in foss_list:
foss_list_choices.append((str(foss_row[0]), str(foss_row[0]) + ' (' + str(foss_row[1]) + ')'))
lang_list = TutorialResource.objects.filter(Q(status=1) | Q(status=2), tutorial_detail__foss__show_on_homepage=0).values('language__name').annotate(
Count('id')).order_by('language').values_list('language__name', 'id__count').distinct()
for lang_row in lang_list:
lang_list_choices.append((str(lang_row[0]), str(lang_row[0]) + ' (' + str(lang_row[1]) + ')'))
self.fields['search_otherfoss'].choices = foss_list_choices
self.fields['search_otherlanguage'].choices = lang_list_choices
class ArchivedTutorialSearchForm(forms.Form):
search_archivedfoss = forms.ChoiceField(
choices=[],
widget=forms.Select(),
required=False,
)
search_archivedlanguage = forms.ChoiceField(
choices=[],
widget=forms.Select(),
required=False,
)
def __init__(self, *args, **kwargs):
super(ArchivedTutorialSearchForm, self).__init__(*args, **kwargs)
foss_list_choices = [('', '-- All Courses --'), ]
lang_list_choices = [('', '-- All Languages --'), ]
foss_list = TutorialResource.objects.filter(Q(status=1) | Q(status=2), language__name='English', tutorial_detail__foss__show_on_homepage=2).values('tutorial_detail__foss__foss').annotate(
Count('id')).order_by('tutorial_detail__foss__foss').values_list('tutorial_detail__foss__foss', 'id__count').distinct()
for foss_row in foss_list:
foss_list_choices.append((str(foss_row[0]), str(foss_row[0]) + ' (' + str(foss_row[1]) + ')'))
lang_list = TutorialResource.objects.filter(Q(status=1) | Q(status=2), tutorial_detail__foss__show_on_homepage=2).values('language__name').annotate(
Count('id')).order_by('language').values_list('language__name', 'id__count').distinct()
for lang_row in lang_list:
lang_list_choices.append((str(lang_row[0]), str(lang_row[0]) + ' (' + str(lang_row[1]) + ')'))
self.fields['search_archivedfoss'].choices = foss_list_choices
self.fields['search_archivedlanguage'].choices = lang_list_choices
class TestimonialsForm(forms.ModelForm):
source_title = forms.CharField(required=False)
source_link = forms.CharField(required=False)
scan_copy = forms.FileField(label='Select a Scaned copy', required=False)
status = forms.BooleanField(required=False)
class Meta(object):
model = Testimonials
exclude = ['approved_by', 'user']
def file_size(value):
'''
Checks if the size is greater than the fixed
limit & raises an error if the size is greater.
100 MB = 104857600 B
500 MB = 524288000 B
50 MB = 52428800 B
'''
if value.size > 52428800:
raise ValidationError('File too large. Size should not exceed 50 MiB.')
class MediaTestimonialForm(forms.Form):
'''
Form to take in the values for the media testimonials
and save in the MediaTestimonials table.
'''
def __init__(self, *args, **kwargs):
on_home_page = kwargs.pop('on_home_page')
super(MediaTestimonialForm, self).__init__(*args, **kwargs)
foss_list_choices = [('', '-- All Courses --'), ]
foss_list = FossCategory.objects.filter(status=1, show_on_homepage=on_home_page).values('foss').annotate(
Count('id')).order_by('foss').values_list('foss').distinct()
for foss_row in foss_list:
foss_list_choices.append((str(foss_row[0]), str(foss_row[0]) ))
self.fields['foss'].choices = foss_list_choices
self.fields['foss'].widget.attrs['class'] = 'form-control'
self.fields['media'].widget.attrs['class'] = 'form-control'
self.fields['media'].widget.attrs['id'] = 'media_element'
self.fields['name'].widget.attrs['class'] = 'form-control'
self.fields['workshop_details'].widget.attrs['class'] = 'form-control'
self.fields['content'].widget.attrs['class'] = 'form-control'
foss = forms.ChoiceField(
choices=[],
widget=forms.Select()
)
name = forms.CharField(label='Name', required=True)
workshop_details = forms.CharField(label='Workshop Details (Workshop name, venue, Date | e.g. Spoken Workshop, IIT Bombay, 26 January 2018)', required=True)
media = forms.FileField(label='File(Select an mp4/mov/mp3 file less than 50MB)', required=True)
content = forms.CharField(label='Short Description', widget=forms.Textarea, required=True,
max_length=500)
def clean(self):
if 'media' not in self.cleaned_data:
raise ValidationError({'media': ['No file or empty file given', ]})
super(MediaTestimonialForm, self).clean()
formats = ['mp4', 'mp3', 'mov']
if self.cleaned_data['media'].name[-3:] not in formats:
self._errors["media"] = self.error_class(["Not a valid file format."])
return self.cleaned_data['media']
class MediaTestimonialEditForm(forms.ModelForm):
class Meta:
model = MediaTestimonials
exclude = ['path', 'created']
widgets = {
'foss' : forms.Select(attrs={'class': "form-control"}),
'workshop_details': forms.TextInput(attrs={'class': "form-control"}),
'content' : forms.Textarea(attrs={'class': "form-control"}),
'user' : forms.TextInput(attrs={'class': "form-control"})
}
labels = {
'user': "Name",
'workshop_details': "Workshop Details (Workshop name, venue, Date | e.g. Spoken Workshop, IIT Bombay, 26 January 2018)",
'content': "Short Description"
}
class ExpressionForm(forms.ModelForm):
class Meta(object):
model = InductionInterest
fields = '__all__'
widgets = {
'other_comments': forms.Textarea,
'other_medium': forms.Textarea,
'other_education': forms.Textarea,
'other_specialisation': forms.Textarea,
'other_designation': forms.Textarea,
'other_language': forms.Textarea,
'college_address': forms.Textarea,
}
def __init__(self, *args, **kwargs):
super(ExpressionForm, self).__init__(*args, **kwargs)
self.fields['other_comments'].required = False
self.fields['other_language'].required = False
self.fields['other_medium'].required = False
self.fields['other_education'].required = False
self.fields['other_specialisation'].required = False
self.fields['other_designation'].required = False
self.fields['borrow_laptop'].required = False
def clean(self):
cleaned_data = super(ExpressionForm, self).clean()
try:
mother_tongue = cleaned_data['mother_tongue']
except KeyError:
mother_tongue = ''
try:
medium = cleaned_data['medium_of_studies']
except KeyError:
medium = ''
try:
education = cleaned_data['education']
except KeyError:
education = ''
try:
specialisation = cleaned_data['specialisation']
except KeyError:
specialisation = ''
try:
designation = cleaned_data['designation']
except KeyError:
designation = ''
try:
bring_laptop = cleaned_data['bring_laptop']
except KeyError:
bring_laptop = ''
other_language = cleaned_data['other_language']
if mother_tongue.lower() == 'other' and not other_language:
self.add_error('other_language', 'Other mother tongue is required.')
other_education = cleaned_data['other_education']
if education.lower() == 'other' and not other_education:
self.add_error('other_education', 'Other education is required.')
other_medium = cleaned_data['other_medium']
if medium.lower() == 'other' and not other_medium:
self.add_error('other_medium', 'Other medium is required.')
other_specialisation = cleaned_data['other_specialisation']
if specialisation.lower() == 'other' and not other_specialisation:
self.add_error('other_specialisation', 'Other specialisation is required.')
other_designation = cleaned_data['other_designation']
if designation.lower() == 'other' and not other_designation:
self.add_error('other_designation', 'Other designation is required.')
borrow_laptop = cleaned_data['borrow_laptop']
if bring_laptop.lower() == 'no' and not borrow_laptop:
self.add_error('borrow_laptop', 'field is required.')
| gpl-3.0 |
kingvuplus/ops | lib/python/Plugins/SystemPlugins/PositionerSetup/rotor_calc.py | 103 | 3251 | import math
f = 1.00 / 298.257 # Earth flattning factor
r_sat = 42164.57 # Distance from earth centre to satellite
r_eq = 6378.14 # Earth radius
def calcElevation(SatLon, SiteLat, SiteLon, Height_over_ocean = 0):
a0 = 0.58804392
a1 = -0.17941557
a2 = 0.29906946E-1
a3 = -0.25187400E-2
a4 = 0.82622101E-4
sinRadSiteLat = math.sin(math.radians(SiteLat))
cosRadSiteLat = math.cos(math.radians(SiteLat))
Rstation = r_eq / (math.sqrt( 1.00 - f * (2.00 - f) * sinRadSiteLat **2))
Ra = (Rstation + Height_over_ocean) * cosRadSiteLat
Rz = Rstation * (1.00 - f) * (1.00 - f) * sinRadSiteLat
alfa_rx = r_sat * math.cos(math.radians(SatLon - SiteLon)) - Ra
alfa_ry = r_sat * math.sin(math.radians(SatLon - SiteLon))
alfa_rz = -Rz
alfa_r_north = -alfa_rx * sinRadSiteLat + alfa_rz * cosRadSiteLat
alfa_r_zenith = alfa_rx * cosRadSiteLat + alfa_rz * sinRadSiteLat
den = alfa_r_north **2 + alfa_ry **2
if den > 0:
El_geometric = math.degrees(math.atan(alfa_r_zenith / math.sqrt(den)))
else:
El_geometric = 90
x = math.fabs(El_geometric + 0.589)
refraction = math.fabs(a0 + (a1 + (a2 + (a3 + a4 * x) * x) * x) * x)
if El_geometric > 10.2:
El_observed = El_geometric + 0.01617 * (math.cos(math.radians(math.fabs(El_geometric)))/math.sin(math.radians(math.fabs(El_geometric))) )
else:
El_observed = El_geometric + refraction
if alfa_r_zenith < -3000:
El_observed = -99
return El_observed
def calcAzimuth(SatLon, SiteLat, SiteLon, Height_over_ocean = 0):
def rev(number):
return number - math.floor(number / 360.0) * 360
sinRadSiteLat = math.sin(math.radians(SiteLat))
cosRadSiteLat = math.cos(math.radians(SiteLat))
Rstation = r_eq / (math.sqrt(1 - f * (2 - f) * sinRadSiteLat **2))
Ra = (Rstation + Height_over_ocean) * cosRadSiteLat
Rz = Rstation * (1 - f) ** 2 * sinRadSiteLat
alfa_rx = r_sat * math.cos(math.radians(SatLon - SiteLon)) - Ra
alfa_ry = r_sat * math.sin(math.radians(SatLon - SiteLon))
alfa_rz = -Rz
alfa_r_north = -alfa_rx * sinRadSiteLat + alfa_rz * cosRadSiteLat
if alfa_r_north < 0:
Azimuth = 180 + math.degrees(math.atan(alfa_ry / alfa_r_north))
elif alfa_r_north > 0:
Azimuth = rev(360 + math.degrees(math.atan(alfa_ry / alfa_r_north)))
else:
Azimuth = 0
return Azimuth
def calcDeclination(SiteLat, Azimuth, Elevation):
return math.degrees(math.asin(math.sin(math.radians(Elevation)) * \
math.sin(math.radians(SiteLat)) + \
math.cos(math.radians(Elevation)) * \
math.cos(math.radians(SiteLat)) + \
math.cos(math.radians(Azimuth)) \
))
def calcSatHourangle(SatLon, SiteLat, SiteLon):
Azimuth = calcAzimuth(SatLon, SiteLat, SiteLon )
Elevation = calcElevation(SatLon, SiteLat, SiteLon)
a = - math.cos(math.radians(Elevation)) * math.sin(math.radians(Azimuth))
b = math.sin(math.radians(Elevation)) * math.cos(math.radians(SiteLat)) - \
math.cos(math.radians(Elevation)) * math.sin(math.radians(SiteLat)) * \
math.cos(math.radians(Azimuth))
# Works for all azimuths (northern & southern hemisphere)
returnvalue = 180 + math.degrees(math.atan(a / b))
if Azimuth > 270:
returnvalue += 180
if returnvalue > 360:
returnvalue = 720 - returnvalue
if Azimuth < 90:
returnvalue = 180 - returnvalue
return returnvalue
| gpl-2.0 |
sosguns2002/interactive-mining | interactive-mining-3rdparty-madis/madis/src/lib/pyparsing.py | 16 | 148777 | # module pyparsing.py
#
# Copyright (c) 2003-2009 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form "<salutation>, <addressee>!")::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from parseString() can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.2"
__versionTime__ = "17 February 2009 19:45"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
if sys.version_info[0] > 2:
_PY3K = True
_MAX_INT = sys.maxsize
basestring = str
else:
_PY3K = False
_MAX_INT = sys.maxint
if not _PY3K:
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
else:
_ustr = str
unichr = chr
if not _PY3K:
def _str2dict(strg):
return dict( [(c,0) for c in strg] )
else:
_str2dict = set
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
if not _PY3K:
alphas = string.lowercase + string.uppercase
else:
alphas = string.ascii_lowercase + string.ascii_uppercase
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputLine __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like ParseFatalException, but thrown internally when an
ErrorStop indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by validate() if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (len(results))
- by list index (results[0], results[1], etc.)
- by attribute (results.<resultsName>)
"""
__slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( reversed(self.__toklist) )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given defaultValue or None if no
defaultValue is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
del other
return self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a ParseResults object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a ParseResults.
Accepts an optional indent argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
#~ out.append('\n')
out.append( v.dump(indent,depth+1) )
#~ out.append('\n')
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
#~ out.append('\n')
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
par, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + self.keys()
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR > 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this ParserElement. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original ParserElement object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set breakFlag to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as f(s,l,t)."""
STAR_ARGS = 4
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if not _PY3K:
codeObj = f.func_code
else:
codeObj = f.code
if codeObj.co_flags & STAR_ARGS:
return f
numargs = codeObj.co_argcount
if not _PY3K:
if hasattr(f,"im_self"):
numargs -= 1
else:
if hasattr(f,"__self__"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
if not _PY3K:
call_im_func_code = f.__call__.im_func.func_code
else:
call_im_func_code = f.__code__
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if call_im_func_code.co_flags & STAR_ARGS:
return f
numargs = call_im_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 0
except AttributeError:
if not _PY3K:
call_func_code = f.__call__.func_code
else:
call_func_code = f.__call__.__code__
# not a bound method, get info directly from __call__ method
if call_func_code.co_flags & STAR_ARGS:
return f
numargs = call_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs > 3:
def tmp(s,l,t):
return f(f.__call__.__self__, s,l,t)
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks),
fn(loc,toks), fn(toks), or just fn(), where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
fn(s,loc,expr,err) where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw ParseFatalException
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException, err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException, err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
raise value
return value
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException, pe:
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method ParserElement.enablePackrat(). If
your program uses psyco to "compile as you go", you must call
enablePackrat before calling psyco.full(). If you do not do this,
Python will crash. For best results, call enablePackrat() immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set parseAll to True (equivalent to ending
the grammar with StringEnd()).
Note: parseString implicitly calls expandtabs() on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the loc argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling parseWithTabs on your grammar before calling parseString
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full (s,loc,toks) signature, and
reference the input string using the parse action's s argument
- explictly expand the tabs in your input string before calling
parseString
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
StringEnd()._parse( instring, loc )
except ParseBaseException, exc:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
maxMatches argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
except ParseBaseException, pe:
raise pe
def transformString( self, instring ):
"""Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out))
except ParseBaseException, pe:
raise pe
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to scanString, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
maxMatches argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException, pe:
raise pe
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a ParserElement"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns And with error stop"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a ParserElement"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns MatchFirst"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a ParserElement"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns Or"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a ParserElement"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns Each"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a ParserElement"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns NotAny"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for setResultsName, with listAllMatches=default::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this ParserElement; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
ParserElement's defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before parseString when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other )
else:
self.ignoreExprs.append( Suppress( other ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set flag to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException, exc:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract ParserElement subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
#self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
#s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with Literal::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
identChars is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; caseless allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = _str2dict(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = _str2dict(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = _str2dict(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
#self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is " \\t\\r\\n". Also takes optional min, max, and exact arguments,
as defined for the Word class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
#self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordStart(alphanums). WordStart will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = _str2dict(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordEnd(alphanums). WordEnd will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = _str2dict(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given ParseExpressions to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(Empty,self).__init__(*args, **kwargs)
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException, pe:
raise ParseSyntaxException(pe)
except IndexError, ie:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given ParseExpressions to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.optionals = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += list(e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt)
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. FollowedBy
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. FollowedBy always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. NotAny
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, NotAny does *not* skip over leading whitespace. NotAny
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If include is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The ignore
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
print "found ignoreExpr, advance to", loc
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the Forward variable using the '<<' operator.
Note: take care when assigning to Forward not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the Forward::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( string.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying 'adjacent=False' in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement._normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception, exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing 'combine=True' in the constructor.
If combine is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr )
def _flatten(L):
if type(L) is not list: return [L]
if L == []: return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches a
previous literal, will also match the leading "1:1" in "1:10".
If this is not desired, use matchPreviousExpr.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches by
expressions, will *not* match the leading "1:1" in "1:10";
the expressions are evaluated first, and then compared, so
"1" is compared with "10".
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a MatchFirst for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a MatchFirst object (if caseless=True, or
if creating a Regex raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = list(strs[:])
elif isinstance(strs,basestring):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the Dict, ZeroOrMore, and Group tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the Dict results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action keepOriginalText, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional asString argument is passed as False, then the return value is a
ParseResults containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to originalTextFor contains expressions with defined
results names, you must set asString to False if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
matchExpr = locMarker("_original_start") + expr + locMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with transformString().
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
<TD> or <DIV>.
Call withAttribute with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in (class="Customer",align="right"), or
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
withAttribute.ANY_VALUE as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one blockStatement.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException,err:
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| gpl-3.0 |
t-abe/chainer | cupy/statistics/order.py | 5 | 1527 | def amin(a, axis=None, out=None, keepdims=False, dtype=None):
"""Returns the minimum of an array or the minimum along an axis.
Args:
a (cupy.ndarray): Array to take the minimum.
axis (int): Along which axis to take the minimum. The flattened array
is used by default.
out (cupy.ndarray): Output array.
keepdims (bool): If True, the axis is remained as an axis of size one.
dtype: Data type specifier.
Returns:
cupy.ndarray: The minimum of ``a``, along the axis if specified.
.. seealso:: :func:`numpy.amin`
"""
# TODO(okuta): check type
return a.min(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def amax(a, axis=None, out=None, keepdims=False, dtype=None):
"""Returns the maximum of an array or the maximum along an axis.
Args:
a (cupy.ndarray): Array to take the maximum.
axis (int): Along which axis to take the maximum. The flattened array
is used by default.
out (cupy.ndarray): Output array.
keepdims (bool): If True, the axis is remained as an axis of size one.
dtype: Data type specifier.
Returns:
cupy.ndarray: The maximum of ``a``, along the axis if specified.
.. seealso:: :func:`numpy.amax`
"""
# TODO(okuta): check type
return a.max(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# TODO(okuta): Implement nanmin
# TODO(okuta): Implement nanmax
# TODO(okuta): Implement ptp
# TODO(okuta): Implement percentile
| mit |
jayceyxc/hue | desktop/core/ext-py/markdown/markdown/extensions/meta.py | 131 | 2607 | #!usr/bin/python
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
Basic Usage:
>>> import markdown
>>> text = '''Title: A Test Doc.
... Author: Waylan Limberg
... John Doe
... Blank_Data:
...
... The body. This is paragraph one.
... '''
>>> md = markdown.Markdown(['meta'])
>>> md.convert(text)
u'<p>The body. This is paragraph one.</p>'
>>> md.Meta
{u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
Make sure text without Meta Data still works (markdown < 1.6b returns a <p>).
>>> text = ' Some Code - not extra lines of meta data.'
>>> md = markdown.Markdown(['meta'])
>>> md.convert(text)
u'<pre><code>Some Code - not extra lines of meta data.\\n</code></pre>'
>>> md.Meta
{}
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
Project website: <http://www.freewisdom.org/project/python-markdown/Meta-Data>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
"""
import markdown, re
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
class MetaExtension (markdown.Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
class MetaPreprocessor(markdown.preprocessors.Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
while 1:
line = lines.pop(0)
if line.strip() == '':
break # blank line - done
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
meta[key] = [m1.group('value').strip()]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(configs={}):
return MetaExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| apache-2.0 |
feliperfranca/FelipeRFranca_site | django/utils/regex_helper.py | 361 | 12079 | """
Functions for reversing a regular expression (used in reverse URL resolving).
Used internally by Django and not intended for external use.
This is not, and is not intended to be, a complete reg-exp decompiler. It
should be good enough for a large class of URLS, however.
"""
# Mapping of an escape character to a representative of that class. So, e.g.,
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
# this sequence. Any missing key is mapped to itself.
ESCAPE_MAPPINGS = {
"A": None,
"b": None,
"B": None,
"d": u"0",
"D": u"x",
"s": u" ",
"S": u"x",
"w": u"x",
"W": u"!",
"Z": None,
}
class Choice(list):
"""
Used to represent multiple possibilities at this point in a pattern string.
We use a distinguished type, rather than a list, so that the usage in the
code is clear.
"""
class Group(list):
"""
Used to represent a capturing group in the pattern string.
"""
class NonCapture(list):
"""
Used to represent a non-capturing group in the pattern string.
"""
def normalize(pattern):
"""
Given a reg-exp pattern, normalizes it to a list of forms that suffice for
reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(5) Ignore comments and any of the reg-exp flags that won't change
what we construct ("iLmsu"). "(?x)" is an error, however.
(6) Raise an error on all other non-capturing (?...) forms (e.g.
look-ahead and look-behind matches) and any disjunctive ('|')
constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = pattern_iter.next()
except StopIteration:
return zip([u''], [[]])
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(u".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = pattern_iter.next()
result.append(ch)
ch, escaped = pattern_iter.next()
while escaped or ch != ']':
ch, escaped = pattern_iter.next()
elif ch == '(':
# Some kind of group.
ch, escaped = pattern_iter.next()
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group(((u"%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = pattern_iter.next()
if ch in "iLmsu#":
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = pattern_iter.next()
if ch != '<':
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
name = []
ch, escaped = pattern_iter.next()
while ch != '>':
name.append(ch)
ch, escaped = pattern_iter.next()
param = ''.join(name)
result.append(Group(((u"%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
elif ch in "*?+{":
# Quanitifers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quanitifer, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = pattern_iter.next()
else:
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return zip([u''], [[]])
return zip(*flatten_result(result))
def next_char(input_iter):
"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yields the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = input_iter.next()
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True
def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. We want to walk to the
close of this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Returns the minimum number of occurences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = input_iter.next()
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = input_iter.next()
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = input_iter.next()
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
def contains(source, inst):
"""
Returns True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False
def flatten_result(source):
"""
Turns the given source sequence into a list of reg-exp possibilities and
their arguments. Returns a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [u''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = [u'']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, basestring):
continue
piece = u''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = u''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args
| bsd-3-clause |
EvanK/ansible | lib/ansible/plugins/action/win_updates.py | 29 | 11371 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.parsing.yaml.objects import AnsibleUnicode
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
DEFAULT_REBOOT_TIMEOUT = 1200
def _run_win_updates(self, module_args, task_vars, use_task):
display.vvv("win_updates: running win_updates module")
wrap_async = self._task.async_val
result = self._execute_module_with_become(module_name='win_updates',
module_args=module_args,
task_vars=task_vars,
wrap_async=wrap_async,
use_task=use_task)
return result
def _reboot_server(self, task_vars, reboot_timeout, use_task):
display.vvv("win_updates: rebooting remote host after update install")
reboot_args = {
'reboot_timeout': reboot_timeout
}
reboot_result = self._run_action_plugin('win_reboot', task_vars,
module_args=reboot_args)
if reboot_result.get('failed', False):
raise AnsibleError(reboot_result['msg'])
# only run this if the user has specified we can only use scheduled
# tasks, the win_shell command requires become and will be skipped if
# become isn't available to use
if use_task:
display.vvv("win_updates: skipping WUA is not busy check as "
"use_scheduled_task=True is set")
else:
display.vvv("win_updates: checking WUA is not busy with win_shell "
"command")
# While this always returns False after a reboot it doesn't return
# a value until Windows is actually ready and finished installing
# updates. This needs to run with become as WUA doesn't work over
# WinRM, ignore connection errors as another reboot can happen
command = "(New-Object -ComObject Microsoft.Update.Session)." \
"CreateUpdateInstaller().IsBusy"
shell_module_args = {
'_raw_params': command
}
try:
shell_result = self._execute_module_with_become(
module_name='win_shell', module_args=shell_module_args,
task_vars=task_vars, wrap_async=False, use_task=use_task
)
display.vvv("win_updates: shell wait results: %s"
% json.dumps(shell_result))
except Exception as exc:
display.debug("win_updates: Fatal error when running shell "
"command, attempting to recover: %s" % to_text(exc))
display.vvv("win_updates: ensure the connection is up and running")
# in case Windows needs to reboot again after the updates, we wait for
# the connection to be stable again
wait_for_result = self._run_action_plugin('wait_for_connection',
task_vars)
if wait_for_result.get('failed', False):
raise AnsibleError(wait_for_result['msg'])
def _run_action_plugin(self, plugin_name, task_vars, module_args=None):
# Create new task object and reset the args
new_task = self._task.copy()
new_task.args = {}
if module_args is not None:
for key, value in module_args.items():
new_task.args[key] = value
# run the action plugin and return the results
action = self._shared_loader_obj.action_loader.get(
plugin_name,
task=new_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj
)
return action.run(task_vars=task_vars)
def _merge_dict(self, original, new):
dict_var = original.copy()
dict_var.update(new)
return dict_var
def _execute_module_with_become(self, module_name, module_args, task_vars,
wrap_async, use_task):
orig_become = self._play_context.become
orig_become_method = self._play_context.become_method
orig_become_user = self._play_context.become_user\
if not use_task:
if orig_become is None or orig_become is False:
self._play_context.become = True
if orig_become_method != 'runas':
self._play_context.become_method = 'runas'
if orig_become_user is None or orig_become_user == 'root':
self._play_context.become_user = 'SYSTEM'
try:
module_res = self._execute_module(module_name=module_name,
module_args=module_args,
task_vars=task_vars,
wrap_async=wrap_async)
finally:
self._play_context.become = orig_become
self._play_context.become_method = orig_become_method
self._play_context.become_user = orig_become_user
return module_res
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
state = self._task.args.get('state', 'installed')
reboot = self._task.args.get('reboot', False)
reboot_timeout = self._task.args.get('reboot_timeout',
self.DEFAULT_REBOOT_TIMEOUT)
use_task = boolean(self._task.args.get('use_scheduled_task', False),
strict=False)
if state not in ['installed', 'searched']:
result['failed'] = True
result['msg'] = "state must be either installed or searched"
return result
try:
reboot = boolean(reboot)
except TypeError as exc:
result['failed'] = True
result['msg'] = "cannot parse reboot as a boolean: %s" % to_text(exc)
return result
if not isinstance(reboot_timeout, int):
result['failed'] = True
result['msg'] = "reboot_timeout must be an integer"
return result
if reboot and self._task.async_val > 0:
result['failed'] = True
result['msg'] = "async is not supported for this task when " \
"reboot=yes"
return result
# Run the module
new_module_args = self._task.args.copy()
new_module_args.pop('reboot', None)
new_module_args.pop('reboot_timeout', None)
result = self._run_win_updates(new_module_args, task_vars, use_task)
# if the module failed to run at all then changed won't be populated
# so we just return the result as is
# https://github.com/ansible/ansible/issues/38232
failed = result.get('failed', False)
if ("updates" not in result.keys() and self._task.async_val == 0) or failed:
result['failed'] = True
return result
changed = result.get('changed', False)
updates = result.get('updates', dict())
filtered_updates = result.get('filtered_updates', dict())
found_update_count = result.get('found_update_count', 0)
installed_update_count = result.get('installed_update_count', 0)
# Handle automatic reboots if the reboot flag is set
if reboot and state == 'installed' and not \
self._play_context.check_mode:
previously_errored = False
while result['installed_update_count'] > 0 or \
result['found_update_count'] > 0 or \
result['reboot_required'] is True:
display.vvv("win_updates: check win_updates results for "
"automatic reboot: %s" % json.dumps(result))
# check if the module failed, break from the loop if it
# previously failed and return error to the user
if result.get('failed', False):
if previously_errored:
break
previously_errored = True
else:
previously_errored = False
reboot_error = None
# check if a reboot was required before installing the updates
if result.get('msg', '') == "A reboot is required before " \
"more updates can be installed":
reboot_error = "reboot was required before more updates " \
"can be installed"
if result.get('reboot_required', False):
if reboot_error is None:
reboot_error = "reboot was required to finalise " \
"update install"
try:
changed = True
self._reboot_server(task_vars, reboot_timeout,
use_task)
except AnsibleError as exc:
result['failed'] = True
result['msg'] = "Failed to reboot remote host when " \
"%s: %s" \
% (reboot_error, to_text(exc))
break
result.pop('msg', None)
# rerun the win_updates module after the reboot is complete
result = self._run_win_updates(new_module_args, task_vars,
use_task)
if result.get('failed', False):
return result
result_updates = result.get('updates', dict())
result_filtered_updates = result.get('filtered_updates', dict())
updates = self._merge_dict(updates, result_updates)
filtered_updates = self._merge_dict(filtered_updates,
result_filtered_updates)
found_update_count += result.get('found_update_count', 0)
installed_update_count += result.get('installed_update_count', 0)
if result['changed']:
changed = True
# finally create the return dict based on the aggregated execution
# values if we are not in async
if self._task.async_val == 0:
result['changed'] = changed
result['updates'] = updates
result['filtered_updates'] = filtered_updates
result['found_update_count'] = found_update_count
result['installed_update_count'] = installed_update_count
return result
| gpl-3.0 |
sgraham/nope | third_party/WebKit/Tools/Scripts/webkitpy/common/config/irc.py | 69 | 1467 | # Copyright (c) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
server = "irc.freenode.net"
port = 6667
channel = "#blink"
nickname = "commit-bot"
update_wait_seconds = 10
retry_attempts = 8
| bsd-3-clause |
Pathfinderdev/Pathfinder | share/qt/extract_strings_qt.py | 1294 | 1784 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
liuyxpp/blohg | blohg/ext.py | 2 | 5784 | # -*- coding: utf-8 -*-
"""
blohg.ext
~~~~~~~~~
Blohg support for 3rd-party extensions.
:copyright: (c) 2010-2013 by Rafael Goncalves Martins
:license: GPL-2, see LICENSE for more details.
"""
from flask import Blueprint
from flask.ctx import _app_ctx_stack
from flask.globals import current_app
from flask.helpers import locked_cached_property
from imp import new_module
from jinja2.loaders import FileSystemLoader
from blohg.static import BlohgStaticFile
from blohg.templating import BlohgLoader
import os
import posixpath
import sys
class BlohgBlueprint(Blueprint):
@locked_cached_property
def jinja_loader(self):
if self.template_folder is not None:
if ':repo:' in self.root_path: # just load from repo
root_path = self.root_path[self.root_path.find(':repo:') + 6:]
return BlohgLoader(posixpath.join(root_path,
self.template_folder))
return FileSystemLoader(os.path.join(self.root_path,
self.template_folder))
def register(self, app, options, first_registration=False):
def register_static(state):
self.repo_static_folder = None
if self.has_static_folder:
if ':repo:' in self.root_path: # just load from repo
static_folder = self.static_folder[
self.static_folder.find(':repo:') + 6:]
endpoint = '%s.static' % state.blueprint.name
if endpoint not in state.app.view_functions:
raise RuntimeError('Static endpoint not registered yet '
' for %s!' % state.blueprint.name)
state.app.view_functions[endpoint] = \
BlohgStaticFile(static_folder)
self.repo_static_folder = static_folder
self.record(register_static)
return Blueprint.register(self, app, options, first_registration)
class BlohgExtension(object):
def __init__(self, import_name):
self.import_name = import_name
# id is used as prefix for most of the extension-related stuff naming
self.ext_id = self.import_name.replace('.', '_')
self._callbacks = []
self._register_extension()
@property
def g(self):
key = '_%s_globals' % self.ext_id
if not hasattr(current_app, key):
setattr(current_app, key, current_app.app_ctx_globals_class())
return getattr(current_app, key)
def setup_extension(self, f):
self._callbacks.append(f)
return f
def _register_extension(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'extension_registry'):
ctx.extension_registry = []
ctx.extension_registry.append(self)
return
raise RuntimeError('Failed to initialize extension registry.')
def _load_extension(self, app):
for callback in self._callbacks:
if callable(callback):
callback(app)
class ExtensionImporter(object):
"""Loader and Finder to import Python plugins from the Mercurial
repository. Mostly based on:
https://github.com/mitsuhiko/flask/blob/master/flask/exthook.py
See PEP 302 for details.
"""
def __init__(self, changectx, ext_dir):
self.changectx = changectx
self.ext_dir = ext_dir
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def new(cls, *args, **kwargs):
obj = cls(*args, **kwargs)
sys.meta_path[:] = [x for x in sys.meta_path if obj != x] + [obj]
return obj
def module_file(self, fullname):
fullname = fullname.replace('.', posixpath.sep)
for path in [posixpath.join(self.ext_dir, fullname + i) \
for i in [posixpath.sep + '__init__.py', '.py']]:
if path in self.changectx.files:
return path
def find_module(self, fullname, path=None):
if not fullname.startswith('blohg_'): # ...starting with blohg_
return
if self.module_file(fullname) is not None:
return self
def load_module(self, fullname):
mod = sys.modules.setdefault(fullname, new_module(fullname))
mod.__file__ = self.get_filename(fullname)
mod.__loader__ = self
if self.is_package(fullname):
mod.__path__ = [mod.__file__.rsplit(posixpath.sep, 1)[0]]
mod.__package__ = fullname
else:
mod.__package__ = fullname.rpartition('.')[0]
exec(self.get_code(fullname), mod.__dict__)
return mod
def get_fctx(self, fullname):
filename = self.module_file(fullname)
if filename is None:
raise ImportError('Module not found: %s' % fullname)
return self.changectx.get_filectx(filename)
def is_package(self, fullname):
filename = self.get_filename(fullname)
return filename.endswith(posixpath.sep + '__init__.py')
def get_code(self, fullname):
return compile(self.get_source(fullname), self.get_filename(fullname),
'exec')
def get_source(self, fullname):
return self.get_fctx(fullname).data
def get_filename(self, fullname):
filename = self.module_file(fullname)
if filename is None:
raise ImportError('Module not found: %s' % fullname)
return ':repo:%s' % filename # :repo: is a placeholder
| gpl-2.0 |
Valloric/ycmd | ycmd/tests/clang/include_cache_test.py | 5 | 6642 | # Copyright (C) 2017 Davit Samvelyan davitsamvelyan@gmail.com
# Synopsys.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
import os
from time import sleep
from nose.tools import eq_
from hamcrest import ( assert_that,
contains,
contains_inanyorder,
equal_to,
has_entries,
has_entry,
has_properties,
not_ )
from ycmd.completers.cpp.include_cache import IncludeCache
from ycmd.tests.clang import PathToTestFile
from ycmd.tests.test_utils import TemporaryTestDir
def IncludeCache_NotCached_DirInaccessible_test():
include_cache = IncludeCache()
eq_( include_cache._cache, {} )
includes = include_cache.GetIncludes( PathToTestFile( 'unknown_dir' ) )
eq_( includes, [] )
eq_( include_cache._cache, {} )
def IncludeCache_NotCached_DirAccessible_test():
include_cache = IncludeCache()
eq_( include_cache._cache, {} )
includes = include_cache.GetIncludes( PathToTestFile( 'cache_test' ) )
mtime = os.path.getmtime( PathToTestFile( 'cache_test' ) )
assert_that( includes, contains( has_properties( {
'name': 'foo.h',
'entry_type': 1
} ) ) )
assert_that( include_cache._cache,
has_entry( PathToTestFile( 'cache_test' ),
has_entries( { 'mtime': mtime,
'includes': contains( has_properties( {
'name': 'foo.h',
'entry_type': 1
} ) ) } ) ) )
def IncludeCache_Cached_NoNewMtime_test():
include_cache = IncludeCache()
eq_( include_cache._cache, {} )
old_includes = include_cache.GetIncludes( PathToTestFile( 'cache_test' ) )
old_mtime = os.path.getmtime( PathToTestFile( 'cache_test' ) )
assert_that( old_includes, contains( has_properties( {
'name': 'foo.h',
'entry_type': 1
} ) ) )
assert_that( include_cache._cache,
has_entry( PathToTestFile( 'cache_test' ),
has_entries( { 'mtime': old_mtime,
'includes': contains( has_properties( {
'name': 'foo.h',
'entry_type': 1
} ) ) } ) ) )
new_includes = include_cache.GetIncludes( PathToTestFile( 'cache_test' ) )
new_mtime = os.path.getmtime( PathToTestFile( 'cache_test' ) )
eq_( new_mtime, old_mtime )
assert_that( new_includes, contains( has_properties( {
'name': 'foo.h',
'entry_type': 1
} ) ) )
assert_that( include_cache._cache,
has_entry( PathToTestFile( 'cache_test' ),
has_entries( { 'mtime': new_mtime,
'includes': contains( has_properties( {
'name': 'foo.h',
'entry_type': 1
} ) ) } ) ) )
def IncludeCache_Cached_NewMtime_test():
with TemporaryTestDir() as tmp_dir:
include_cache = IncludeCache()
eq_( include_cache._cache, {} )
foo_path = os.path.join( tmp_dir, 'foo' )
with open( foo_path, 'w' ) as foo_file:
foo_file.write( 'foo' )
old_includes = include_cache.GetIncludes( tmp_dir )
old_mtime = os.path.getmtime( tmp_dir )
assert_that( old_includes, contains( has_properties( {
'name': 'foo',
'entry_type': 1
} ) ) )
assert_that( include_cache._cache,
has_entry( tmp_dir,
has_entries( {
'mtime': old_mtime,
'includes': contains( has_properties( {
'name': 'foo',
'entry_type': 1
} ) )
} ) ) )
sleep( 2 )
bar_path = os.path.join( tmp_dir, 'bar' )
with open( bar_path, 'w' ) as bar_file:
bar_file.write( 'bar' )
new_includes = include_cache.GetIncludes( tmp_dir )
new_mtime = os.path.getmtime( tmp_dir )
assert_that( old_mtime, not_( equal_to( new_mtime ) ) )
assert_that( new_includes, contains_inanyorder(
has_properties( {
'name': 'foo',
'entry_type': 1
} ),
has_properties( {
'name': 'bar',
'entry_type': 1
} )
) )
assert_that( include_cache._cache,
has_entry( tmp_dir, has_entries( {
'mtime': new_mtime,
'includes': contains_inanyorder(
has_properties( {
'name': 'foo',
'entry_type': 1
} ),
has_properties( {
'name': 'bar',
'entry_type': 1
} ) )
} ) ) )
| gpl-3.0 |
openembedded/openembedded | contrib/qa/bugzilla.py | 32 | 140512 | #
# BugZilla query page scanner to work with ancient
# Debian Stable bugzilla installationss
#
# This includes three test sites
# site contains one bug entry
# all_bugs contains all Openmoko bugs as of \today
# no_bug is a query which showed no bug
#
from HTMLParser import HTMLParser
class BugQueryExtractor(HTMLParser):
STATE_NONE = 0
STATE_FOUND_TR = 1
STATE_FOUND_NUMBER = 2
STATE_FOUND_PRIO = 3
STATE_FOUND_PRIO2 = 4
STATE_FOUND_NAME = 5
STATE_FOUND_PLATFORM = 6
STATE_FOUND_STATUS = 7
STATE_FOUND_WHATEVER = 8 # I don't know this field
STATE_FOUND_DESCRIPTION =9
def __init__(self):
HTMLParser.__init__(self)
self.state = self.STATE_NONE
self.bug = None
self.bugs = []
def handle_starttag(self, tag, attr):
if self.state == self.STATE_NONE and tag.lower() == "tr":
# check for bz_normal and bz_P2 as indicator in buglist.cgi
# use 'all' and 'map' on python2.5
if len(attr) == 1 and attr[0][0] == 'class' and \
('bz_normal' in attr[0][1] or 'bz_blocker' in attr[0][1] or 'bz_enhancement' in attr[0][1] or 'bz_major' in attr[0][1] or 'bz_minor' in attr[0][1] or 'bz_trivial' in attr[0][1] or 'bz_critical' in attr[0][1] or 'bz_wishlist' in attr[0][1]) \
and 'bz_P' in attr[0][1]:
print "Found tr %s %s" % (tag, attr)
self.state = self.STATE_FOUND_TR
elif self.state == self.STATE_FOUND_TR and tag.lower() == "td":
self.state += 1
def handle_endtag(self, tag):
if tag.lower() == "tr":
print "Going back"
if self.state != self.STATE_NONE:
self.bugs.append( (self.bug,self.status) )
self.state = self.STATE_NONE
self.bug = None
if self.state > 1 and tag.lower() == "td":
print "Next TD"
self.state += 1
def handle_data(self,data):
data = data.strip()
# skip garbage
if len(data) == 0:
return
if self.state == self.STATE_FOUND_NUMBER:
"""
#1995 in bugs.oe.org has [SEC] additionally to the number and we want to ignore it
"""
print "Bug Number '%s'" % data.strip()
if self.bug:
print "Ignoring bug data"
return
self.bug = data
elif self.state == self.STATE_FOUND_STATUS:
print "Status Name '%s'" % data.strip()
self.status = data
def result(self):
print "Found bugs"
return self.bugs
#
bugs_openmoko = """<!-- 1.0@bugzilla.org -->
<!-- 1.0@bugzilla.org -->
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Bug List</title>
<link href="/style/style.css" rel="stylesheet" type="text/css" />
<link href="/bugzilla/css/buglist.css" rel="stylesheet" type="text/css">
</head>
<body bgcolor="#FFFFFF" onload="">
<!-- 1.0@bugzilla.org -->
<div id="header">
<a href="http://bugzilla.openmoko.org/cgi-bin/bugzilla/" id="site_logo"><img src="/style/images/openmoko_logo.png" alt="openmoko.org" /></a>
<div id="main_navigation">
<ul>
<li><a href="http://www.openmoko.org/" class="nav_home"><span>Home</span></a></li>
<li><a href="http://wiki.openmoko.org/" class="nav_wiki"><span>Wiki</span></a></li>
<li><a href="http://bugzilla.openmoko.org/" class="nav_bugzilla selected"><span>Bugzilla</span></a></li>
<li><a href="http://planet.openmoko.org/" class="nav_planet"><span>Planet</span></a></li>
<li><a href="http://projects.openmoko.org/" class="nav_projects"><span>Projects</span></a></li>
<li><a href="http://lists.openmoko.org/" class="nav_lists"><span>Lists</span></a></li>
</ul>
</div>
</div>
<div class="page_title">
<strong>Bug List</strong>
</div>
<div class="container">
<div align="center">
<b>Fri Mar 16 20:51:52 CET 2007</b><br>
<a href="quips.cgi"><i>It was a time of great struggle and heroic deeds
</i></a>
</div>
<hr>
282 bugs found.
<!-- 1.0@bugzilla.org -->
<table class="bz_buglist" cellspacing="0" cellpadding="4" width="100%">
<colgroup>
<col class="bz_id_column">
<col class="bz_severity_column">
<col class="bz_priority_column">
<col class="bz_platform_column">
<col class="bz_owner_column">
<col class="bz_status_column">
<col class="bz_resolution_column">
<col class="bz_summary_column">
</colgroup>
<tr align="left">
<th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.bug_id">ID</a>
</th>
<th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.bug_severity,bugs.bug_id">Sev</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.priority,bugs.bug_id">Pri</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.rep_platform,bugs.bug_id">Plt</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=map_assigned_to.login_name,bugs.bug_id">Owner</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.bug_status,bugs.bug_id">State</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.resolution,bugs.bug_id">Result</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.short_desc,bugs.bug_id">Summary</a>
</th>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=1">1</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>CLOS</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>kernel is running way too slow
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=2">2</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>SD card driver unstable
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=3">3</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>CLOS</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Debug Board trying to control GSM_EN / FA_19
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=4">4</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>random crashes of gsmd
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=5">5</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>call progress information is lacking
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=6">6</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>GSM_EN should be called nGSM_EN
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=7">7</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>CLOS</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>PMU RTC driver date/time conversion is erroneous
</td>
</tr>
<tr class="bz_critical bz_P5 ">
<td>
<a href="show_bug.cgi?id=8">8</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P5</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>SD/MMC: Card sometimes not detected
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=9">9</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Boot speed too low (kernel part)
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=10">10</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>CLOS</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>u-boot support for usb-serial lacking
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=11">11</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>u-boot lacks USB DFU support
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=12">12</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>gordon_hsu@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Boot speed too low (bootloader part)
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=13">13</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>teddy@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>power button should not immediately react
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=14">14</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>bootloader should display startup image before booting th...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=15">15</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>kernel oops when unloading g_ether
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=16">16</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>bluetooth pullup / pulldown resistors
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=17">17</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>microSD socket still has mechanical contact problems
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=18">18</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>OE build of u_boot with CVSDATE 20061030 uses latest git ...
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=19">19</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>teddy@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>"reboot" doesn't work
</td>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=20">20</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>connection status
</td>
</tr>
<tr class="bz_blocker bz_P3 ">
<td>
<a href="show_bug.cgi?id=21">21</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P3</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>sms function missing
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=22">22</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>outgoing call generates 'segmentation fault' when the pee...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=23">23</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>dtmf support not available now
</td>
</tr>
<tr class="bz_wishlist bz_P2 ">
<td>
<a href="show_bug.cgi?id=24">24</a>
</td>
<td><nobr>wis</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>libgsmd/misc.h: lgsm_get_signal_quality()
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=25">25</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>davewu01@seed.net.tw</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>GtkSpinBox unfinished
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=26">26</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Pixmap Engine and Shadows
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=27">27</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Labels on GtkButton don't appear centered
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=28">28</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>GtkComboBox styling woes
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=29">29</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>GtkProgressBar styling woes
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=30">30</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>REOP</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Touchscreen emits bogus events under X
</td>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=31">31</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Display colors are slightly off
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=32">32</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Common function for loading GdkPixbuf
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=33">33</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>incoming call status report causes gsmd to crash.
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=34">34</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>WORK</nobr>
</td>
<td>Need to decide if lgsm_handle is still valid.
</td>
</tr>
<tr class="bz_enhancement bz_P5 ">
<td>
<a href="show_bug.cgi?id=35">35</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P5</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>WONT</nobr>
</td>
<td>Support debug board from u-boot
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=36">36</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Implement s3c2410 udc (usb device controller) driver in u...
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=37">37</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>Implement USB Device Firmware Upgrade (DFU)
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=38">38</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>implement USB serial emulation in u-boot
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=39">39</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>gordon_hsu@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Move LCM initialization into u-boot (currently in kernel ...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=40">40</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>test + debug display of image on LCM in u-boot
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=41">41</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>evaluate sapwood theme engine
</td>
</tr>
<tr class="bz_blocker bz_P3 ">
<td>
<a href="show_bug.cgi?id=42">42</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P3</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>dynamic mtd partition table cration
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=43">43</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>StatusBar (Footer) API
</td>
</tr>
<tr class="bz_wishlist bz_P2 ">
<td>
<a href="show_bug.cgi?id=44">44</a>
</td>
<td><nobr>wis</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>InputMethod API
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=45">45</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Automatic opening input methods
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=46">46</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>266MHz initialization of GTA01Bv2
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=47">47</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>Evaluate sapwood theming engine
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=48">48</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>Only power up the phone in case power button was pressed ...
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=49">49</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Implement touchscreen & click daemon
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=50">50</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Sound Event API
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=51">51</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Preferences API
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=52">52</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>cj_steven@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Single Instance Startup
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=53">53</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>DTMF tones during call
</td>
</tr>
<tr class="bz_blocker bz_P1 ">
<td>
<a href="show_bug.cgi?id=54">54</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P1</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>PIN Entry
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=55">55</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Don't pop up the dialer interface initially
</td>
</tr>
<tr class="bz_blocker bz_P4 ">
<td>
<a href="show_bug.cgi?id=56">56</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P4</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Integrate with contacts database
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=57">57</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>LATE</nobr>
</td>
<td>Recording Calls
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=58">58</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>API for devmand
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=59">59</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Real DPI vs. Fake DPI
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=60">60</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>fontconfig antialiasing
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=61">61</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Theme is very slow
</td>
</tr>
<tr class="bz_wishlist bz_P2 ">
<td>
<a href="show_bug.cgi?id=62">62</a>
</td>
<td><nobr>wis</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>High Level Multi Layer Network Discovery API
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=63">63</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>matchbox-panel 1 vs. 2
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=64">64</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Show Cipher Status in GSM-Panel applet
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=65">65</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Visual indication for SMS overflow
</td>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=66">66</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Applet for Missed Events
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=67">67</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>WONT</nobr>
</td>
<td>libmokopim not necessary
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=68">68</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>SIM backend for EDS
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=69">69</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Speed up System Initialization
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=70">70</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Minimize Services started on Bootup
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=71">71</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>gordon_hsu@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>make a short vibration pulse once u-boot is starting
</td>
</tr>
<tr class="bz_wishlist bz_P2 ">
<td>
<a href="show_bug.cgi?id=72">72</a>
</td>
<td><nobr>wis</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>gordon_hsu@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Add on-screen boot menu
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=73">73</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>test and verify battery charger control (pcf50606)
</td>
</tr>
<tr class="bz_blocker bz_P1 ">
<td>
<a href="show_bug.cgi?id=74">74</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P1</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>WONT</nobr>
</td>
<td>stub audio driver to power up amp and route audio through...
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=75">75</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>PWM code for display brightness control
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=76">76</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>teddy@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Implement PWM control for vibrator
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=77">77</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>songcw@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Finish, test and verify agpsd implementation
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=78">78</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Implement and test ASoC platform driver
</td>
</tr>
<tr class="bz_blocker bz_P1 ">
<td>
<a href="show_bug.cgi?id=79">79</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P1</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>suspend/resume to RAM support
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=80">80</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>WONT</nobr>
</td>
<td>Add sysfs entry for PMU wakeup reason
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=81">81</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Decide how PMU RTC alarm interrupt is signalled to userspace
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=82">82</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>implement and test cpufreq interface to S3C2410 PLL / SLO...
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=83">83</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>teddy@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>evaluate process and I/O schedulers
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=84">84</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>enable voluntary preemption
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=85">85</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>test NO_IDLE_HZ / tickless idle
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=86">86</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>APM emulation for battery / charger / charging and possib...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=87">87</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>define and implement how headphone jack routing/signallin...
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=88">88</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>teddy@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>use and test PMU watchdog driver
</td>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=89">89</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>teddy@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>determine correct gamma calibration values and put them i...
</td>
</tr>
<tr class="bz_critical bz_P1 ">
<td>
<a href="show_bug.cgi?id=90">90</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P1</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>GSM TS07.10 multiplex missing
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=91">91</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>debug sd card timeout problems
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=92">92</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>test multiple microSD card vendors for compatibility with...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=93">93</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>test 4GB microSD card compatibility
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=94">94</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>+ symbol support
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=95">95</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>verify charger current and battery temperature reading co...
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=96">96</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>make sure PMU alarm (set via rtc interface) is persistent
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=97">97</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>remove static mtd partition table, use u-boot created dyn...
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=98">98</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>how to do touch panel calibration in factory and store va...
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=99">99</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>Implement SMS support
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=100">100</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Implement Cell Broadcast support
</td>
</tr>
</table>
<table class="bz_buglist" cellspacing="0" cellpadding="4" width="100%">
<colgroup>
<col class="bz_id_column">
<col class="bz_severity_column">
<col class="bz_priority_column">
<col class="bz_platform_column">
<col class="bz_owner_column">
<col class="bz_status_column">
<col class="bz_resolution_column">
<col class="bz_summary_column">
</colgroup>
<tr align="left">
<th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.bug_id">ID</a>
</th>
<th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.bug_severity,bugs.bug_id">Sev</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.priority,bugs.bug_id">Pri</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.rep_platform,bugs.bug_id">Plt</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=map_assigned_to.login_name,bugs.bug_id">Owner</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.bug_status,bugs.bug_id">State</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.resolution,bugs.bug_id">Result</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.short_desc,bugs.bug_id">Summary</a>
</th>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=101">101</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Implement GPRS setup/teardown support
</td>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=102">102</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>SIM phonebook access
</td>
</tr>
<tr class="bz_blocker bz_P1 ">
<td>
<a href="show_bug.cgi?id=103">103</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P1</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>power-up/power-down GSM Modem
</td>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=104">104</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>LATE</nobr>
</td>
<td>Volume control
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=105">105</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>add passthrough mode
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=106">106</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>LATE</nobr>
</td>
<td>Emergency Call Support
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=107">107</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>obtain list of operators / control operator selection
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=108">108</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>REOP</nobr>
</td>
<td><nobr></nobr>
</td>
<td>allow query of manufacturer/model/revision/imei
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=109">109</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>add dbus interface, like recent upstream gpsd
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=110">110</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>look into gps / agps integration
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=111">111</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>integrate agpsd in our system power management.
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=112">112</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>How to deliver kernel-level alarm to destination app
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=113">113</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>marcel@holtmann.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>bluetooth headset support
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=114">114</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Who is managing wakeup times?
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=115">115</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>marcel@holtmann.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>A2DP / alsa integration
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=116">116</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>marcel@holtmann.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>bluetooth HID support (host)
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=117">117</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>marcel@holtmann.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>bluetooth HID support (device)
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=118">118</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>marcel@holtmann.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>bluetooth networking support
</td>
</tr>
<tr class="bz_critical bz_P3 ">
<td>
<a href="show_bug.cgi?id=119">119</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P3</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>merge openmoko-taskmanager into openmoko-footer
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=120">120</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>marcel@holtmann.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>bluetooth OBEX
</td>
</tr>
<tr class="bz_critical bz_P3 ">
<td>
<a href="show_bug.cgi?id=121">121</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P3</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>merge openmoko-mainmenu into openmoko-mainmenu (panel)
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=122">122</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>rename openmoko-history to openmoko-taskmanager
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=123">123</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>rename openmoko-history to openmoko-taskmanager
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=124">124</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>modem volume control on connection
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=125">125</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>davewu01@seed.net.tw</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>FInger UI is not usable on 2.8" screen
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=126">126</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>sunzhiyong@fic-sh.com.cn</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Remove back functionality from Main Menu
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=127">127</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>davewu01@seed.net.tw</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Power On / Off Images needed
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=128">128</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>cj_steven@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Tap and hold on panel icon doesn't change to Today applic...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=129">129</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>ken_zhao@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Create / Find better system fonts
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=130">130</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>GTK Popup menus size incorrectly
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=131">131</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>davewu01@seed.net.tw</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Move Search Open / Close buttons into same location
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=132">132</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>sunzhiyong@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Task Manager is not quick to use
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=133">133</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>davewu01@seed.net.tw</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Designer image layouts should have both 4 corners and ful...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=134">134</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Stylus applications need close function
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=135">135</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Finger applications need close functionality
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=136">136</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>application manager doesn't build
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=137">137</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>submit patch against ipkg upstream
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=138">138</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>sunzhiyong@fic-sh.com.cn</nobr>
</td>
<td><nobr>REOP</nobr>
</td>
<td><nobr></nobr>
</td>
<td>submit patch against matchbox-window-manager upstream
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=139">139</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>GSM API
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=140">140</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>stefan@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>add network-enabled fbgrab from openEZX to openmoko-devel...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=141">141</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Need support for device under WIndows and OS X
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=142">142</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>sjf2410-linux cleanup / help message / NAND read
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=143">143</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>REOP</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Implement NAND write/read support in OpenOCD
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=144">144</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>when phone is hard-rebooted, Xfbdev complains about /tmp/...
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=145">145</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>WONT</nobr>
</td>
<td>battery is not automatically charging
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=146">146</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>sjf2410-linux does not contain latest svn code
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=147">147</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>WONT</nobr>
</td>
<td>openmoko-panel-applet could not be resized
</td>
</tr>
<tr class="bz_blocker bz_P1 ">
<td>
<a href="show_bug.cgi?id=148">148</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P1</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>gsmd not talking to TI modem on GTA01Bv2
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=149">149</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>lm4857 not i2c address compliant
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=150">150</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>graeme.gregory@wolfsonmicro...</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>INVA</nobr>
</td>
<td>ASoC patch doesn't compile
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=151">151</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>cj_steven@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Does mainmenu need libmatchbox or not?
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=152">152</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>cj_steven@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>VFOLDERDIR is hardcoded
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=153">153</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>sunzhiyong@fic-sh.com.cn</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Rationale for copying GtkIconView instead of deriving?
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=154">154</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>sunzhiyong@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>mainmenu crashes when clicking wheel the 2nd time
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=155">155</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>sunzhiyong@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>How to get back one level if you are in a subdirectory?
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=156">156</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>sunzhiyong@fic-sh.com.cn</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Where is mainmenu going to look for applications?
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=157">157</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>davewu01@seed.net.tw</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>The sizes of each keys are too small
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=158">158</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>musicplayer crashes
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=159">159</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>sunzhiyong@fic-sh.com.cn</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>display thumbnails of actual applications
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=160">160</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>sunzhiyong@fic-sh.com.cn</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>display thumbnails in 3x3 grid
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=161">161</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>davewu01@seed.net.tw</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Docked Keypad is too small
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=162">162</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>REMI</nobr>
</td>
<td>libmutil0_svn.bb setup misses libltdl creation
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=163">163</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Audio Profile Management
</td>
</tr>
<tr class="bz_major bz_P1 ">
<td>
<a href="show_bug.cgi?id=164">164</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P1</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>improve non-SanDisk microSD support in u-boot
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=165">165</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>openmoko-simplemediaplayer doesn't build in OE
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=166">166</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>u-boot cdc_acm hot un-plug/replug hang
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=167">167</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>stefan@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>add LCM QVGA switching support
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=168">168</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>usb0 is not automatically configured
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=169">169</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>gdb currently broken (gdb-6.4-r0)
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=170">170</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>usbtty: sometimes bogus characters arrive
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=171">171</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>agpsd source code and bitbake rules not in our svn
</td>
</tr>
<tr class="bz_blocker bz_P1 ">
<td>
<a href="show_bug.cgi?id=172">172</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P1</nobr>
</td>
<td><nobr>Oth</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>missing openmoko-dialer-window-pin.o breaks build
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=173">173</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Oth</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>no NAND partitions due to ID mismatch if using defaults
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=174">174</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Oth</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>defconfig-om-gta01 could use updating
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=175">175</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>MOKO_FINGER_WINDOW has to show_all and then hide to initi...
</td>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=176">176</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>libgsmd need a mechanism to avoid dead waiting.
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=177">177</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>libmokoui widget functions should return GtkWidget
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=178">178</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>u-boot 'factory reset' option
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=179">179</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Implement u-boot power-off timer
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=180">180</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>uboot build broken for EABI
</td>
</tr>
<tr class="bz_wishlist bz_P2 ">
<td>
<a href="show_bug.cgi?id=181">181</a>
</td>
<td><nobr>wis</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Password Storage/Retrieval Application
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=182">182</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-panel-demo-simple hardcodes -Werror
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=183">183</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>openmoko-simple-mediaplayer missing mkinstalldirs and has...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=184">184</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>cj_steven@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>openmoko-mainmenu should link against libmb
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=185">185</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>thomas@openedhand.com</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-dates lacks intltool-update.in
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=186">186</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>thomas@openedhand.com</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Fingerbubbles take endless amount of ram and get OOMed
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=187">187</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>src/target/OM-2007/README doesn't mention ipkg patch
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=188">188</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-panel-demo fails to build
</td>
</tr>
<tr class="bz_normal bz_P5 ">
<td>
<a href="show_bug.cgi?id=189">189</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P5</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>thomas@openedhand.com</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-dates tries to include non-existant header
</td>
</tr>
<tr class="bz_normal bz_P5 ">
<td>
<a href="show_bug.cgi?id=190">190</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P5</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>thomas@openedhand.com</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>No rule to build dates.desktop
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=191">191</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>investigate if we can set CPU voltage to 1.8V on 200MHz o...
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=192">192</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Graphic bootsplash during userspace sysinit
</td>
</tr>
<tr class="bz_enhancement bz_P3 ">
<td>
<a href="show_bug.cgi?id=193">193</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P3</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Information about current charging status when AC is online
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=194">194</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>stefan@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>s3c2410fb 8bit mode corrupt
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=195">195</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>stefan@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>passthrough mode (Directly use GSM Modem from PC
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=196">196</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Merge back fixes to openmoko recipes from OE
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=197">197</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Make theme suitable for qvga screens.
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=198">198</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Please enable CONFIG_TUN as a module in defconfig-om-gta01
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=199">199</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>sean_mosko@fic.com.tw</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>We need freely licensed ringtones
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=200">200</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>PARALLEL_MAKE seems to not work
</td>
</tr>
</table>
<table class="bz_buglist" cellspacing="0" cellpadding="4" width="100%">
<colgroup>
<col class="bz_id_column">
<col class="bz_severity_column">
<col class="bz_priority_column">
<col class="bz_platform_column">
<col class="bz_owner_column">
<col class="bz_status_column">
<col class="bz_resolution_column">
<col class="bz_summary_column">
</colgroup>
<tr align="left">
<th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.bug_id">ID</a>
</th>
<th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.bug_severity,bugs.bug_id">Sev</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.priority,bugs.bug_id">Pri</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.rep_platform,bugs.bug_id">Plt</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=map_assigned_to.login_name,bugs.bug_id">Owner</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.bug_status,bugs.bug_id">State</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.resolution,bugs.bug_id">Result</a>
</th><th colspan="1">
<a href="buglist.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bugs.short_desc,bugs.bug_id">Summary</a>
</th>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=201">201</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Use TEXT_BASE 0x37f80000 in u-boot on GTA01Bv2 and higher
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=202">202</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Start using NAND hardware ECC support
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=203">203</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>fix the web site: http://openmoko.com/
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=204">204</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Fatal error in Special:Newimages
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=205">205</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>add code to u-boot to query hardware revision and serial ...
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=206">206</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Disallow setting of overvoltage via pcf50606 kernel driver
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=207">207</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>DFU mode should only be enabled when in "911 key" mode
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=208">208</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>u-boot DFU upload broken
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=209">209</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>u-boot DFU needs to block console access while in DFU mode
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=210">210</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Oth</nobr>
</td>
<td><nobr>henryk@ploetzli.ch</nobr>
</td>
<td><nobr>ASSI</nobr>
</td>
<td><nobr></nobr>
</td>
<td>"now" causes frequent rebuilds and fills disks
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=211">211</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>sjf2410-linux-native.bb has do_deploy in the wrong location
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=212">212</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Oth</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Charging seems completely broken
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=213">213</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Oth</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-dates-0.1+svnnow fails certificate check
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=214">214</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Add CVS_TARBALL_STASH for missing upstream sources
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=215">215</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>fingerwheel crashes mainmenu when touching the black part
</td>
</tr>
<tr class="bz_blocker bz_P3 ">
<td>
<a href="show_bug.cgi?id=216">216</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P3</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>thomas@openedhand.com</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>DUPL</nobr>
</td>
<td>contacts crashes when tying to enter import widget
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=217">217</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Implement NAND OTP area read/write as u-boot commands
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=218">218</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Distinguish stylus from finger via tslib
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=219">219</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>tonyguan@fic-sh.com.cn</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-dialer r1159 fails to compile
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=220">220</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>libgsmd_device.c is missing
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=221">221</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>thomas@openedhand.com</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Can't add new contacts via the gui
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=222">222</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>thomas@openedhand.com</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>WORK</nobr>
</td>
<td>Can't add new events
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=223">223</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>thomas@openedhand.com</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>weekview only displays half the week
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=224">224</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>call to uboot-mkimage requires ${STAGING_BINDIR} prefix
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=225">225</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Fix ordering of do_deploy in uboot to be compatible with ...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=226">226</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>REOP</nobr>
</td>
<td><nobr></nobr>
</td>
<td>dfu-util-native do_deploy tries to install from wrong sou...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=227">227</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Add openmoko-mirrors.bbclass and enable use of it
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=228">228</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>openmoko applications(contacts, appmanager ...) easily c...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=229">229</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>davewu01@seed.net.tw</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>outgoing call/incoming call/talking status should be more...
</td>
</tr>
<tr class="bz_trivial bz_P2 ">
<td>
<a href="show_bug.cgi?id=230">230</a>
</td>
<td><nobr>tri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Use the toolchain speified in $CROSS_COMPILE in u-boot.
</td>
</tr>
<tr class="bz_minor bz_P2 ">
<td>
<a href="show_bug.cgi?id=231">231</a>
</td>
<td><nobr>min</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>switch display backlight GPIO to "output, off" when suspe...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=232">232</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Oth</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>n-plicate buglog mails
</td>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=233">233</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>power-off timer should be halted in DFU mode
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=234">234</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>werner@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>check for bad blocks in first _and_ second page of each b...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=235">235</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Deploy openocd-native, not openocd, and make openocd-nati...
</td>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=236">236</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Close moko_dialog_window several times, moko_stylus_demo ...
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=237">237</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Fix remaining https urls in bitbake recipes.
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=238">238</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Mac</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>LATE</nobr>
</td>
<td>manual test bug
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=239">239</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>LATE</nobr>
</td>
<td>foo
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=240">240</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>INVA</nobr>
</td>
<td>broken-1.0-r0-do_fetch
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=241">241</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>CLOS</nobr>
</td>
<td><nobr>LATE</nobr>
</td>
<td>broken-1.0-r0-do_fetch
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=242">242</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>INVA</nobr>
</td>
<td>broken-1.0-r0-do_compile
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=243">243</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>INVA</nobr>
</td>
<td>broken-1.0-r0-do_configure
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=244">244</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>I can't build Xorg7.1 from MokoMakefile
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=245">245</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Neo crashes when writing large amounts of data to SD
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=246">246</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Debug board needs to be recognized by mainline linux kernel.
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=247">247</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>thomas@openedhand.com</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-dates svn rev. 335 does no longer build
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=248">248</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Buttons disappear under zoom
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=249">249</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>add command to print gsmd version number
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=250">250</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>INVA</nobr>
</td>
<td>broken-1.0-r0-do_compile
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=251">251</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>INVA</nobr>
</td>
<td>broken-1.0-r0-do_compile
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=252">252</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>REOP</nobr>
</td>
<td><nobr></nobr>
</td>
<td>openmoko-devel-image-1.0-r0-do_rootfs
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=253">253</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Mount /tmp as tmpfs
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=254">254</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Oth</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>bug with "patch" on arklinux 2006.1??
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=255">255</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>tony_tu@fiwin.com.tw</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>battery voltage scale is not correct
</td>
</tr>
<tr class="bz_critical bz_P2 ">
<td>
<a href="show_bug.cgi?id=256">256</a>
</td>
<td><nobr>cri</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>GSM Modem doesn't seem to work on some devices
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=257">257</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Oth</nobr>
</td>
<td><nobr>sean_chiang@fic.com.tw</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>AUX button sticking
</td>
</tr>
<tr class="bz_major bz_P2 ">
<td>
<a href="show_bug.cgi?id=258">258</a>
</td>
<td><nobr>maj</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>All</nobr>
</td>
<td><nobr>cj_steven@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Main Menu needs to have Single Instance functionality
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=259">259</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>stefan@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>implement 500mA charging in u-boot
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=260">260</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>stefan@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>implement 100mA charging in Linux
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=261">261</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>stefan@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Implement 500mA charging using wall-outlet charger
</td>
</tr>
<tr class="bz_enhancement bz_P2 ">
<td>
<a href="show_bug.cgi?id=262">262</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>Indicate different charging mode in battery applet
</td>
</tr>
<tr class="bz_blocker bz_P2 ">
<td>
<a href="show_bug.cgi?id=263">263</a>
</td>
<td><nobr>blo</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>gsmd doesn't receive AT reply from the modem properly.
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=264">264</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>package libelf-0.8.6-r0: task do_populate_sysroot: failed
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=265">265</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>MokoMakefile: perl-native fix
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=266">266</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>ftdi-eeprom-native missing confuse-native dependency
</td>
</tr>
<tr class="bz_enhancement bz_P4 ">
<td>
<a href="show_bug.cgi?id=267">267</a>
</td>
<td><nobr>enh</nobr>
</td>
<td><nobr>P4</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>internal function duplicates strstr(3)
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=268">268</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>openmoko-today crashes when one of the buttons is pressed
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=269">269</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-contacts-0.1+svnnow-r3_0_200703151745-do_unpack
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=270">270</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>does our xserver need security updates?
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=271">271</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>laforge@openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>It would be nice if ppp was supported by kernel
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=272">272</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-contacts-0.1+svnnow-r3_0_200703152250-do_unpack
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=273">273</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-contacts-0.1+svnnow-r3_0_200703160254-do_unpack
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=274">274</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-contacts-0.1+svnnow-r3_0_200703160321-do_unpack
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=275">275</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-contacts-0.1+svnnow-r3_0_200703160350-do_unpack
</td>
</tr>
<tr class="bz_normal bz_P3 ">
<td>
<a href="show_bug.cgi?id=276">276</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P3</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>songcw@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>The open file window is too ugly
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=277">277</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-contacts-0.1+svnnow-r3_0_200703160712-do_unpack
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=278">278</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>mickey@vanille-media.de</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>openmoko-contacts-0.1+svnnow-r3_0_200703160805-do_unpack
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=279">279</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>RESO</nobr>
</td>
<td><nobr>FIXE</nobr>
</td>
<td>Appmanager crush when install packages
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=280">280</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>songcw@fic-sh.com.cn</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>openmoko-appmanager not refresh the packages list after r...
</td>
</tr>
<tr class="bz_normal bz_P3 ">
<td>
<a href="show_bug.cgi?id=281">281</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P3</nobr>
</td>
<td><nobr>PC</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>implicit declaration of function `strdup'
</td>
</tr>
<tr class="bz_normal bz_P2 ">
<td>
<a href="show_bug.cgi?id=282">282</a>
</td>
<td><nobr>nor</nobr>
</td>
<td><nobr>P2</nobr>
</td>
<td><nobr>Neo</nobr>
</td>
<td><nobr>buglog@lists.openmoko.org</nobr>
</td>
<td><nobr>NEW</nobr>
</td>
<td><nobr></nobr>
</td>
<td>microSD Problem
</td>
</tr>
</table>
282 bugs found.
<br>
<form method="post" action="long_list.cgi">
<input type="hidden" name="buglist" value="1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282">
<input type="submit" value="Long Format">
<a href="query.cgi">Query Page</a>
<a href="enter_bug.cgi">Enter New Bug</a>
<a href="colchange.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=">Change Columns</a>
<a href="query.cgi?short_desc_type=allwordssubstr&short_desc=&long_desc_type=allwordssubstr&long_desc=&bug_file_loc_type=allwordssubstr&bug_file_loc=&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&emailassigned_to1=1&emailtype1=substring&email1=&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailtype2=substring&email2=&bugidtype=include&bug_id=&votes=&changedin=&chfieldfrom=&chfieldto=Now&chfieldvalue=&field0-0-0=noop&type0-0-0=noop&value0-0-0=">Edit this Query</a>
</form>
<!-- 1.0@bugzilla.org -->
</div>
<div class="footer">
<div class="group">This is <b>Bugzilla</b>: the Mozilla bug system. For more information about what Bugzilla is and what it can do, see <a href="http://www.bugzilla.org/">bugzilla.org</a>.</div>
<!-- 1.0@bugzilla.org -->
<form method="get" action="show_bug.cgi">
<div class="group">
<a href="enter_bug.cgi">New</a> | <a href="query.cgi">Query</a> | <input type="submit" value="Find"> bug # <input name="id" size="6"> | <a href="reports.cgi">Reports</a>
</div>
<div>
<a href="createaccount.cgi">New Account</a> | <a href="query.cgi?GoAheadAndLogIn=1">Log In</a>
</div>
</form>
</div>
</body>
</html>
"""
bugfinder =BugQueryExtractor()
bugfinder.feed(bugs_openmoko)
print bugfinder.result()
print len(bugfinder.result())
seen_numbers = {}
for (number,_) in bugfinder.result():
seen_numbers[number] = "Yes"
for i in range(1,283):
if not seen_numbers.has_key(str(i)):
print "Not seen %d" % i
| mit |
h3llrais3r/SickRage | tests/sickrage_tests/system/restart_tests.py | 11 | 2065 | # coding=utf-8
# This file is part of SickRage.
#
# URL: https://SickRage.GitHub.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""
Test restart
"""
from __future__ import print_function, unicode_literals
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
import sickbeard
from sickbeard.event_queue import Events
from sickrage.system.Restart import Restart
import six
class RestartTests(unittest.TestCase):
"""
Test restart
"""
def test_restart(self):
"""
Test restart
"""
sickbeard.PID = 123456
sickbeard.events = Events(None)
test_cases = {
0: False,
'0': False,
123: False,
'123': False,
123456: True,
'123456': True,
}
unicode_test_cases = {
'0': False,
'123': False,
'123456': True,
}
for tests in test_cases, unicode_test_cases:
for (pid, result) in six.iteritems(tests):
self.assertEqual(Restart.restart(pid), result)
if __name__ == '__main__':
print('=====> Testing {0}'.format(__file__))
SUITE = unittest.TestLoader().loadTestsFromTestCase(RestartTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-3.0 |
heke123/chromium-crosswalk | tools/perf/benchmarks/benchmark_unittest.py | 9 | 3252 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""For all the benchmarks that set options, test that the options are valid."""
from collections import defaultdict
import os
import unittest
from core import perf_benchmark
from telemetry import benchmark as benchmark_module
from telemetry.core import discover
from telemetry.internal.browser import browser_options
from telemetry.testing import progress_reporter
def _GetPerfDir(*subdirs):
perf_dir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(perf_dir, *subdirs)
def _GetAllPerfBenchmarks():
return discover.DiscoverClasses(
_GetPerfDir('benchmarks'), _GetPerfDir(), benchmark_module.Benchmark,
index_by_class_name=True).values()
def _BenchmarkOptionsTestGenerator(benchmark):
def testBenchmarkOptions(self): # pylint: disable=unused-argument
"""Invalid options will raise benchmark.InvalidOptionsError."""
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
benchmark.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
benchmark.SetArgumentDefaults(parser)
return testBenchmarkOptions
class TestNoBenchmarkNamesDuplication(unittest.TestCase):
def runTest(self):
all_benchmarks = _GetAllPerfBenchmarks()
names_to_benchmarks = defaultdict(list)
for b in all_benchmarks:
names_to_benchmarks[b.Name()].append(b)
for n in names_to_benchmarks:
self.assertEquals(1, len(names_to_benchmarks[n]),
'Multiple benchmarks with the same name %s are '
'found: %s' % (n, str(names_to_benchmarks[n])))
class TestNoOverrideCustomizeBrowserOptions(unittest.TestCase):
def runTest(self):
all_benchmarks = _GetAllPerfBenchmarks()
for benchmark in all_benchmarks:
self.assertEquals(True, issubclass(benchmark,
perf_benchmark.PerfBenchmark),
'Benchmark %s needs to subclass from PerfBenchmark'
% benchmark.Name())
self.assertEquals(
benchmark.CustomizeBrowserOptions,
perf_benchmark.PerfBenchmark.CustomizeBrowserOptions,
'Benchmark %s should not override CustomizeBrowserOptions' %
benchmark.Name())
def _AddBenchmarkOptionsTests(suite):
# Using |index_by_class_name=True| allows returning multiple benchmarks
# from a module.
all_benchmarks = _GetAllPerfBenchmarks()
for benchmark in all_benchmarks:
if not benchmark.options:
# No need to test benchmarks that have not defined options.
continue
class BenchmarkOptionsTest(unittest.TestCase):
pass
setattr(BenchmarkOptionsTest, benchmark.Name(),
_BenchmarkOptionsTestGenerator(benchmark))
suite.addTest(BenchmarkOptionsTest(benchmark.Name()))
suite.addTest(TestNoBenchmarkNamesDuplication())
suite.addTest(TestNoOverrideCustomizeBrowserOptions())
def load_tests(loader, standard_tests, pattern):
del loader, standard_tests, pattern # unused
suite = progress_reporter.TestSuite()
_AddBenchmarkOptionsTests(suite)
return suite
| bsd-3-clause |
android-ia/platform_external_chromium_org | tools/telemetry/telemetry/util/find_dependencies.py | 44 | 9255 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import imp
import logging
import modulefinder
import optparse
import os
import sys
import zipfile
from telemetry import benchmark
from telemetry.core import command_line
from telemetry.core import discover
from telemetry.core import util
from telemetry.util import bootstrap
from telemetry.util import cloud_storage
from telemetry.util import path_set
DEPS_FILE = 'bootstrap_deps'
def _InDirectory(subdirectory, directory):
subdirectory = os.path.realpath(subdirectory)
directory = os.path.realpath(directory)
common_prefix = os.path.commonprefix([subdirectory, directory])
return common_prefix == directory
def FindBootstrapDependencies(base_dir):
deps_file = os.path.join(base_dir, DEPS_FILE)
if not os.path.exists(deps_file):
return []
deps_paths = bootstrap.ListAllDepsPaths(deps_file)
return set(
os.path.realpath(os.path.join(util.GetChromiumSrcDir(), os.pardir, path))
for path in deps_paths)
def FindPythonDependencies(module_path):
logging.info('Finding Python dependencies of %s' % module_path)
# Load the module to inherit its sys.path modifications.
imp.load_source(
os.path.splitext(os.path.basename(module_path))[0], module_path)
# Analyze the module for its imports.
finder = modulefinder.ModuleFinder()
finder.run_script(module_path)
# Filter for only imports in Chromium.
for module in finder.modules.itervalues():
# If it's an __init__.py, module.__path__ gives the package's folder.
module_path = module.__path__[0] if module.__path__ else module.__file__
if not module_path:
continue
module_path = os.path.realpath(module_path)
if not _InDirectory(module_path, util.GetChromiumSrcDir()):
continue
yield module_path
def FindPageSetDependencies(base_dir):
logging.info('Finding page sets in %s' % base_dir)
# Add base_dir to path so our imports relative to base_dir will work.
sys.path.append(base_dir)
tests = discover.DiscoverClasses(base_dir, base_dir, benchmark.Benchmark,
index_by_class_name=True)
for test_class in tests.itervalues():
test_obj = test_class()
# Ensure the test's default options are set if needed.
parser = optparse.OptionParser()
test_obj.AddCommandLineArgs(parser)
options = optparse.Values()
for k, v in parser.get_default_values().__dict__.iteritems():
options.ensure_value(k, v)
# Page set paths are relative to their runner script, not relative to us.
util.GetBaseDir = lambda: base_dir
# TODO: Loading the page set will automatically download its Cloud Storage
# deps. This is really expensive, and we don't want to do this by default.
page_set = test_obj.CreatePageSet(options)
# Add all of its serving_dirs as dependencies.
for serving_dir in page_set.serving_dirs:
yield serving_dir
for page in page_set:
if page.is_file:
yield page.serving_dir
def FindExcludedFiles(files, options):
def MatchesConditions(path, conditions):
for condition in conditions:
if condition(path):
return True
return False
# Define some filters for files.
def IsHidden(path):
for pathname_component in path.split(os.sep):
if pathname_component.startswith('.'):
return True
return False
def IsPyc(path):
return os.path.splitext(path)[1] == '.pyc'
def IsInCloudStorage(path):
return os.path.exists(path + '.sha1')
def MatchesExcludeOptions(path):
for pattern in options.exclude:
if (fnmatch.fnmatch(path, pattern) or
fnmatch.fnmatch(os.path.basename(path), pattern)):
return True
return False
# Collect filters we're going to use to exclude files.
exclude_conditions = [
IsHidden,
IsPyc,
IsInCloudStorage,
MatchesExcludeOptions,
]
# Check all the files against the filters.
for path in files:
if MatchesConditions(path, exclude_conditions):
yield path
def FindDependencies(paths, options):
# Verify arguments.
for path in paths:
if not os.path.exists(path):
raise ValueError('Path does not exist: %s' % path)
dependencies = path_set.PathSet()
# Including __init__.py will include Telemetry and its dependencies.
# If the user doesn't pass any arguments, we just have Telemetry.
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(util.GetTelemetryDir(), 'telemetry', '__init__.py')))
dependencies |= FindBootstrapDependencies(util.GetTelemetryDir())
# Add dependencies.
for path in paths:
base_dir = os.path.dirname(os.path.realpath(path))
dependencies.add(base_dir)
dependencies |= FindBootstrapDependencies(base_dir)
dependencies |= FindPythonDependencies(path)
if options.include_page_set_data:
dependencies |= FindPageSetDependencies(base_dir)
# Remove excluded files.
dependencies -= FindExcludedFiles(set(dependencies), options)
return dependencies
def ZipDependencies(paths, dependencies, options):
base_dir = os.path.dirname(os.path.realpath(util.GetChromiumSrcDir()))
with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file:
# Add dependencies to archive.
for path in dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(path, base_dir))
zip_file.write(path, path_in_archive)
# Add symlinks to executable paths, for ease of use.
for path in paths:
link_info = zipfile.ZipInfo(
os.path.join('telemetry', os.path.basename(path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0100777 << 16 # Octal.
relative_path = os.path.relpath(path, base_dir)
link_script = (
'#!/usr/bin/env python\n\n'
'import os\n'
'import sys\n\n\n'
'script = os.path.join(os.path.dirname(__file__), \'%s\')\n'
'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])'
% relative_path)
zip_file.writestr(link_info, link_script)
# Add gsutil to the archive, if it's available. The gsutil in
# depot_tools is modified to allow authentication using prodaccess.
# TODO: If there's a gsutil in telemetry/third_party/, bootstrap_deps
# will include it. Then there will be two copies of gsutil at the same
# location in the archive. This can be confusing for users.
gsutil_path = os.path.realpath(cloud_storage.FindGsutil())
if cloud_storage.SupportsProdaccess(gsutil_path):
gsutil_base_dir = os.path.join(os.path.dirname(gsutil_path), os.pardir)
gsutil_dependencies = path_set.PathSet()
gsutil_dependencies.add(os.path.dirname(gsutil_path))
# Also add modules from depot_tools that are needed by gsutil.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'boto'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'fancy_urllib'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'retry_decorator'))
gsutil_dependencies -= FindExcludedFiles(
set(gsutil_dependencies), options)
# Also add upload.py to the archive from depot_tools, if it is available.
# This allows us to post patches without requiring a full depot_tools
# install. There's no real point in including upload.py if we do not
# also have gsutil, which is why this is inside the gsutil block.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'upload.py'))
for path in gsutil_dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(util.GetTelemetryDir(), base_dir),
'third_party', os.path.relpath(path, gsutil_base_dir))
zip_file.write(path, path_in_archive)
class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed).')
parser.add_option(
'-p', '--include-page-set-data', action='store_true', default=False,
help='Scan tests for page set data and include them.')
parser.add_option(
'-e', '--exclude', action='append', default=[],
help='Exclude paths matching EXCLUDE. Can be used multiple times.')
parser.add_option(
'-z', '--zip',
help='Store files in a zip archive at ZIP.')
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
if args.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
def Run(self, args):
paths = args.positional_args
dependencies = FindDependencies(paths, args)
if args.zip:
ZipDependencies(paths, dependencies, args)
print 'Zip archive written to %s.' % args.zip
else:
print '\n'.join(sorted(dependencies))
return 0
| bsd-3-clause |
yamt/ryu | ryu/contrib/tinyrpc/client.py | 41 | 3068 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .exc import RPCError
class RPCClient(object):
"""Client for making RPC calls to connected servers.
:param protocol: An :py:class:`~tinyrpc.RPCProtocol` instance.
:param transport: A :py:class:`~tinyrpc.transports.ClientTransport`
instance.
"""
def __init__(self, protocol, transport):
self.protocol = protocol
self.transport = transport
def _send_and_handle_reply(self, req):
# sends and waits for reply
reply = self.transport.send_message(req.serialize())
response = self.protocol.parse_reply(reply)
if hasattr(response, 'error'):
raise RPCError('Error calling remote procedure: %s' %\
response.error)
return response
def call(self, method, args, kwargs, one_way=False):
"""Calls the requested method and returns the result.
If an error occured, an :py:class:`~tinyrpc.exc.RPCError` instance
is raised.
:param method: Name of the method to call.
:param args: Arguments to pass to the method.
:param kwargs: Keyword arguments to pass to the method.
:param one_way: Whether or not a reply is desired.
"""
req = self.protocol.create_request(method, args, kwargs, one_way)
return self._send_and_handle_reply(req).result
def get_proxy(self, prefix='', one_way=False):
"""Convenience method for creating a proxy.
:param prefix: Passed on to :py:class:`~tinyrpc.client.RPCProxy`.
:param one_way: Passed on to :py:class:`~tinyrpc.client.RPCProxy`.
:return: :py:class:`~tinyrpc.client.RPCProxy` instance."""
return RPCProxy(self, prefix, one_way)
def batch_call(self, calls):
"""Experimental, use at your own peril."""
req = self.protocol.create_batch_request()
for call_args in calls:
req.append(self.protocol.create_request(*call_args))
return self._send_and_handle_reply(req)
class RPCProxy(object):
"""Create a new remote proxy object.
Proxies allow calling of methods through a simpler interface. See the
documentation for an example.
:param client: An :py:class:`~tinyrpc.client.RPCClient` instance.
:param prefix: Prefix to prepend to every method name.
:param one_way: Passed to every call of
:py:func:`~tinyrpc.client.call`.
"""
def __init__(self, client, prefix='', one_way=False):
self.client = client
self.prefix = prefix
self.one_way = one_way
def __getattr__(self, name):
"""Returns a proxy function that, when called, will call a function
name ``name`` on the client associated with the proxy.
"""
proxy_func = lambda *args, **kwargs: self.client.call(
self.prefix + name,
args,
kwargs,
one_way=self.one_way
)
return proxy_func
| apache-2.0 |
ran5515/DeepDecision | tensorflow/contrib/keras/api/keras/losses/__init__.py | 46 | 2097 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Loss functions.
from tensorflow.contrib.keras.python.keras.losses import binary_crossentropy
from tensorflow.contrib.keras.python.keras.losses import categorical_crossentropy
from tensorflow.contrib.keras.python.keras.losses import cosine_proximity
from tensorflow.contrib.keras.python.keras.losses import hinge
from tensorflow.contrib.keras.python.keras.losses import kullback_leibler_divergence
from tensorflow.contrib.keras.python.keras.losses import mean_absolute_error
from tensorflow.contrib.keras.python.keras.losses import mean_absolute_percentage_error
from tensorflow.contrib.keras.python.keras.losses import mean_squared_error
from tensorflow.contrib.keras.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.contrib.keras.python.keras.losses import poisson
from tensorflow.contrib.keras.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.contrib.keras.python.keras.losses import squared_hinge
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.contrib.keras.python.keras.losses import deserialize
from tensorflow.contrib.keras.python.keras.losses import serialize
from tensorflow.contrib.keras.python.keras.losses import get
del absolute_import
del division
del print_function
| apache-2.0 |
jwaterfield/echidna | echidna/core/config.py | 3 | 30495 | """ Echidna's config module.
Contains the :class:`Config` class and all classes that inherit from it.
"""
from echidna.core.parameter import (RateParameter, ScaleParameter,
ShiftParameter, ResolutionParameter,
SpectraParameter)
import echidna.util.yaml_loader as yaml_loader
import logging
import abc
from collections import OrderedDict
class Config(object):
""" The base class for creating config classes.
Args:
name (string): The name of the config.
Attributes:
_
_name (string): The name of the config.
_type (string): The type of the config, this affects it's
parameter types
_parameters (:class:`OrderedDict`): Dictionary of
parameters.
"""
def __init__(self, name, parameters):
""" Initialise config class
"""
self._logger = logging.getLogger("Config")
self._name = name
self._type = "general"
self._parameters = parameters
def add_par(self, par):
""" Add parameter to the config.
Args:
par (:class:`echidna.core.spectra.Parameter`): The parameter you want
to add.
"""
self._parameters[par._name] = par
@abc.abstractmethod
def dump(self):
""" Abstract base class method to override.
Dumps the config to a config dictionary, containing all
parameters. The dictionary has the form specified in the
:meth:`Config.load` method.
Returns:
dict: Dictionary containing all the information on the
parameters.
"""
raise NotImplementedError("The dump method can only be used "
"when overriden in a derived class.")
@abc.abstractmethod
def dump_to_file(self, path="", filename=None):
""" Abstract base class method to override.
Write config to YAML file.
Args:
path (string, optional): Location to save yaml file to,
default is the current directory.
filename (string, optional): Filename for yaml file. If no
filename is supplied, the default is "spectra_config.yml".
If a blank filename "" is given the config's name is used.
"""
raise NotImplementedError("The dump_to_file method can only be used "
"when overriden in a derived class.")
def get_index(self, parameter):
"""Return the index of a parameter within the existing set
Args:
parameter (string): Name of the parameter.
Raises:
IndexError: parameter is not in the config.
Returns:
int: Index of the parameter
"""
for i, p in enumerate(self.get_pars()):
if p == parameter:
return i
raise IndexError("Unknown parameter %s" % parameter)
def get_name(self):
"""
Returns:
string: Name of :class:`Config` class instance - stored in
:attr:`_name`.
"""
return self._name
def get_par(self, name):
"""Get a named FitParameter.
Args:
name (string): Name of the parameter.
Returns:
:class:`echidna.core.spectra.Parameter`: Named parameter.
"""
return self._parameters[name]
def get_par_by_index(self, index):
""" Get parameter corresponding to given index
Args:
index (int): Index of parameter.
Returns:
:class:`echidna.core.spectra.Parameter`: Corresponding
parameter.
"""
name = self.get_pars()[index]
return self.get_par(name)
def get_pars(self):
"""Get list of all parameter names in the config.
Returns:
list: List of parameter names
"""
return self._parameters.keys()
def get_shape(self):
""" Get the shape of the parameter space.
Returns:
tuple: A tuple constructed of the number of bins for each
parameter in the config - this can be thought of as the
full shape of the parameter space, whether it is the shape
of the parameter space for the fit, or the shape of the
spectral dimensions.
"""
return tuple([self.get_par(par).get_bins() for par in self.get_pars()])
def get_type(self):
"""
Returns:
string: Type of :class:`Config` class instance - stored in
:attr:`_name`.
"""
return self._name
@classmethod
@abc.abstractmethod
def load(cls, config, name="config"):
""" Abstract base class method to override.
Initialise Config class from a config dictionary (classmethod).
Args:
config (dict): Dictionary to create config out of.
name (string, optional): Name to assign to the
:class:`Config`. If no name is supplied the default
'spectra_config' will be used.
Returns:
(:class:`Config`): A config object containing the parameters
from the config dictionary.
Raises:
KeyError: If the :obj:`config` dictionary has the wrong format.
.. warning:: :obj:`config` dict must have valid format.
Valid format is::
{"parameters": {
"<parameter>": {
"low": <low>,
"high": <high>,
"bins": <bins>}}}
"""
raise NotImplementedError("The load method can only be used "
"when overriden in a derived class.")
@classmethod
@abc.abstractmethod
def load_from_file(cls, filename, name=None):
""" Abstract base class method to override.
Initialise Config class from a config file (classmethod).
Args:
filename (str): path to config file
name (string, optional): Assign a name to the :class:`Config`
created. If no name is supplied, the default is 'config'.
If a blank string is supplied, the name of the file will
be used.
Returns:
(:class:`Config`): A config object containing the parameters
in the file.
"""
raise NotImplementedError("The load_from_file method can only be used "
"when overriden in a derived class.")
class GlobalFitConfig(Config):
"""Configuration container for floating systematics and fitting Spectra
objects. Able to load directly with a set list of FitParameters or
from yaml configuration files.
Args:
config_name (string): Name of config
parameters (:class:`OrderedDict`): List of
FitParameter objects
"""
def __init__(self, config_name, parameters):
"""Initialise GlobalFitConfig class
"""
super(GlobalFitConfig, self).__init__(config_name, parameters)
self._type = "global_fit"
def add_config(self, config):
""" Add pars from a :class:`echidna.core.spectra.Config` to this
:class:`echidna.core.spectra.GlobalFitConfig`
Args:
config (:class:`echidna.core.spectra.Config`): Config to be added.
"""
if config._type == "spectra_fit":
spectra_name = config._spectra_name
for par_name in config.get_pars():
name = spectra_name + "_" + par_name
par = config.get_par(par_name)
par._name = name
self.add_par(par, "spectra")
elif config._type == "global_fit":
for par in config.get_global_pars():
self.add_par(par, "global")
for par in config.get_spectra_pars():
self.add_par(par, "spectra")
else:
raise ValueError("Cannot add %s-type config to a config "
"of type %s" % (config._type, self._type))
def add_par(self, par, par_type):
""" Add parameter to the global fit config.
Args:
par (:class:`echidna.core.spectra.FitParameter`): Parameter you want
to add.
par_type (string): The type of parameter (global or spectra).
"""
if par_type != 'global' and par_type != 'spectra':
raise IndexError("%s is an invalid par_type. Must be 'global' or "
"'spectra'." % par_type)
self._parameters[par._name] = {'par': par, 'type': par_type}
def dump(self, basic=False):
""" Dumps the config to a global fit config dictionary,
containing all the 'global' parameters, and a spectral fit
comfig dictionary (if required), containing any 'spectral'
parameters that have been added. The dictionaries have,
respectively, the forms specified in the
:meth:`GlobalFitConfig.load` and
:meth:`echidna.core.spectra.SpectralFitConfig.load` methods.
Returns:
dict: Dictionary containing all the information on the
'global' parameters.
dict: Dictionary containing all the information on the
'spectral' parameters.
"""
# Global fit parameters
main_key = "global_fit_parameters"
global_fit_config = OrderedDict()
global_fit_config[main_key] = OrderedDict()
for par in self.get_global_pars():
dimension = par.get_dimension()
# Make entry for dimensions - as required
if dimension not in global_fit_config[main_key].keys():
global_fit_config[main_key][dimension] = OrderedDict()
name = par.get_name()
# Remove dimension from name, if required
if dimension in name:
name = name.replace(dimension+"_", "")
# Get parameter dict from par
global_fit_config[main_key][dimension][name] = par.to_dict(basic)
# Spectral fit parameters
main_key = "spectral_fit_parameters"
spectral_fit_config = OrderedDict()
spectral_fit_config[main_key] = OrderedDict()
for par in self.get_spectra_pars():
# No dimesnions required here
name = par.get_name()
# Get parameter dict from par
spectral_fit_config[main_key][name] = par.to_dict(basic)
return global_fit_config, spectral_fit_config
def dump_to_file(self, path="", global_fname=None,
spectral_fname=None, basic=False):
""" Write config(s) to YAML file. Separate files are created
for global and spectral parameters.
Args:
path (string, optional): Location to save yaml file(s) to,
default is the current directory.
global_fname (string, optional): Filename for global
parameters yaml file. If no filename is supplied, the
default is "global_fit_config.yml". If a blank filename ""
is given the config's name is used (+ "_global").
spectral_fname (string, optional): Filename for spectral
parameters yaml file. If no filename is supplied, the
default is "spectral_fit_config.yml". If a blank filename ""
is given the config's name is used (+ "_spectral").
basic (bool, optional): If True, only the basic properties:
prior, sigma, low, high and bins are included.
"""
global_fit_config, spectral_fit_config = self.dump(basic)
if global_fname is None:
global_fname = "global_fit_config"
elif global_fname == "":
global_fname = self.get_name()
if ".yml" not in global_fname:
global_fname += ".yml"
with open(path+global_fname, "w") as stream:
yaml_loader.ordered_dump(
global_fit_config, stream=stream, indent=8)
if spectral_fname is None:
spectral_fname = "spectral_fit_config"
elif spectral_fname == "":
spectral_fname = self.get_name()
if ".yml" not in spectral_fname:
spectral_fname += ".yml"
with open(path+spectral_fname, "w") as stream:
yaml_loader.ordered_dump(
spectral_fit_config, stream=stream, indent=8)
def get_par(self, name):
""" Get requested parameter:
Args:
name (string): Name of the parameter
Returns:
:class:`echidna.core.spectra.FitParameter`: The requested parameter.
"""
return self._parameters[name]['par']
def get_global_pars(self):
""" Gets the parameters which are applied to all spectra
simultaneously.
Returns:
list: Of :class:`echidna.core.spectra.FitParameter` objects.
"""
pars = []
for name in self._parameters:
if self._parameters[name]['type'] == 'global':
pars.append(self._parameters[name]['par'])
return pars
def get_spectra_pars(self):
""" Gets the parameters that are applied to individual spectra.
Returns:
list: Of :class:`echidna.core.spectra.FitParameter` objects.
"""
pars = []
for name in self._parameters:
if self._parameters[name]['type'] == 'spectra':
pars.append(self._parameters[name]['par'])
return pars
@classmethod
def load(cls, global_config, spectral_config=None,
name="global_fit_config"):
"""Initialise GlobalFitConfig class from a config dictionary
(classmethod).
Args:
config (dict): Dictionary to create config out of.
spectral_config (dict): Dictionary of spectral fit parameters
to create config out of.
name (string, optional): Name to assign to the
:class:`GlobalFitConfig`. If no name is supplied the
default 'global_fit_config' will be used.
Returns:
(:class:`echidna.core.spectra.GlobalFitConfig`): A config object
containing the parameters in the file called filename.
Raises:
KeyError: If the :obj:`global_config` dictionary does not
start with the key 'global_fit_parameters' as this suggests
the dictionary has the wrong format.
IndexError: If an invalid global fit parameter name is
encountered.
KeyError: If the :obj:`spectral_config` dictionary does not
start with the key 'spectral_fit_parameters' as this
suggests the dictionary has the wrong format.
IndexError: If an invalid spectral fit parameter name is
encountered.
.. warning:: :obj:`config` dict must have valid format.
Valid format is::
{"gloabal_fit_parameters": {
"<spectral_dimension>": {
"<parameter_name>": {
"prior": <prior>,
"sigma": <sigma>,
"low": <low>,
"high": <high>,
"bins": <bins>}}}}
For spectral config see :meth:`SpectralFitConfig.load`.
"""
main_key = "global_fit_parameters"
parameters = OrderedDict()
if main_key not in global_config.keys():
logging.getLogger("extra").debug("\n\n%s\n" % str(global_config))
raise KeyError("Cannot read global fit config dictionary. "
"Please check it has the correct form")
for dim in global_config[main_key]:
for syst in global_config[main_key][dim]:
name = dim + "_" + syst
if syst == 'resolution' or syst == 'resolution_ly':
parameters[name] = {
'par': ResolutionParameter(
name, dimension=dim,
**global_config[main_key][dim][syst]),
'type': 'global'}
elif syst == 'shift':
parameters[name] = {
'par': ShiftParameter(
name, dimension=dim,
**global_config[main_key][dim][syst]),
'type': 'global'}
elif syst == 'scale':
parameters[name] = {
'par': ScaleParameter(
name, dimension=dim,
**global_config[main_key][dim][syst]),
'type': 'global'}
else:
raise IndexError("%s is not a valid global fit parameter."
% syst)
if spectral_config is None:
return cls(name, parameters)
# Add spectral fit parameters:
main_key = "spectral_fit_parameters"
if not spectral_config.get(main_key):
logging.getLogger("extra").debug("\n\n%s\n" % str(spectral_config))
raise KeyError("Cannot read spectra fit config dictionary. "
"Please check it has the correct form")
for syst in spectral_config[main_key]:
if "rate" in syst:
parameters[syst] = {
'par': RateParameter(
syst, **spectral_config[main_key][syst]),
'type': 'spectra'}
else:
raise IndexError("Unknown systematic in config: %s" % syst)
return cls(name, parameters)
@classmethod
def load_from_file(cls, filename, sf_filename=None, name=None):
"""Initialise GlobalFitConfig class from a config file (classmethod).
Args:
filename (string): path to config file
sf_filename (string, optional): path to a separate spectral
fit config file, to include.
name (string, optional): Assign a name to the
:class:`GlobalFitConfig` created. If no name is supplied,
the default is 'global_fit_config'. If a blank string is
supplied, the name of the file will be used.
Returns:
(:class:`echidna.core.spectra.GlobalFitConfig`): A config object
containing the parameters in the file called filename.
"""
config = yaml_loader.ordered_load(open(filename, 'r'))
if sf_filename:
spectral_fit_config = yaml_loader.ordered_load(
open(sf_filename, "r"))
else:
spectral_fit_config = None
if not name:
return cls.load(config, spectral_config=spectral_fit_config)
if name == "":
name = filename[filename.rfind("/")+1:filename.rfind(".")]
return cls.load(config, spectral_config=spectral_fit_config, name=name)
class SpectraFitConfig(Config):
"""Configuration container for floating systematics and fitting Spectra
objects. Able to load directly with a set list of FitParameters or
from yaml configuration files.
Args:
config_name (string): Name of config
parameters (:class:`OrderedDict`): List of
FitParameter objects
spectra_name (string): Name of the spectra associated with the
:class:`echidna.core.spectra.SpectraFitConfig`
Attributes:
_spectra_name (string): Name of the spectra associated with the
:class:`echidna.core.spectra.SpectraFitConfig`
"""
def __init__(self, config_name, parameters, spectra_name):
"""Initialise SpectraFitConfig class
"""
super(SpectraFitConfig, self).__init__(config_name, parameters)
self._type = "spectra_fit"
self._spectra_name = spectra_name
def dump(self, basic=False):
""" Dumps the config to a spectral fit comfig dictionary,
containing all 'spectral' fit parameters. The dictionary has
the form specified in the :meth:`SpectralFitConfig.load`
method.
Returns:
dict: Dictionary containing all the information on the
'spectral' parameters.
"""
# Spectral fit parameters
main_key = "spectral_fit_parameters"
spectral_fit_config = OrderedDict()
spectral_fit_config[main_key] = OrderedDict()
for parameter in self.get_pars():
par = self.get_par(parameter)
# Get parameter dict from par
spectral_fit_config[main_key][parameter] = par.to_dict(basic)
return spectral_fit_config
def dump_to_file(self, path="", spectral_fname=None, basic=False):
""" Write config(s) to YAML file. Separate files are created
for global and spectral parameters.
Args:
path (string, optional): Location to save yaml file(s) to,
default is the current directory.
spectral_fname (string, optional): Filename for spectral
parameters yaml file. If no filename is supplied, the
default is "spectral_fit_config.yml". If a blank filename ""
is given the config's name is used (+ "_spectral").
basic (bool, optional): If True, only the basic properties:
prior, sigma, low, high and bins are included.
"""
spectral_fit_config = self.dump(basic)
if spectral_fname is None:
spectral_fname = "spectral_fit_config"
elif spectral_fname == "":
spectral_fname = self.get_name()
if ".yml" not in spectral_fname:
spectral_fname += ".yml"
with open(path+spectral_fname, "w") as stream:
yaml_loader.ordered_dump(
spectral_fit_config, stream=stream, indent=8)
@classmethod
def load(cls, config, spectra_name, name="spectral_fit_config"):
"""Initialise SpectraFitConfig class from a config dictionary
(classmethod).
Args:
config (dict): Dictionary to create config out of.
name (string, optional): Name to assign to the
:class:`SpectraFitConfig`. If no name is supplied the
default 'spectral_fit_config' will be used.
Returns:
(:class:`SpectraFitConfig`): A config object containing the
parameters from the config dictionary.
Raises:
KeyError: If the :obj:`config` dictionary does not start with
the key 'spectral_fit_parameters' as this suggests the
dictionary has the wrong format.
IndexError: If an invalid spectral fit parameter name is
encountered.
.. warning:: :obj:`config` dict must have valid format.
Valid format is::
{"spectral_fit_parameters": {
"<parameter_name>": {
"prior": <prior>,
"sigma": <sigma>,
"low": <low>,
"high": <high>,
"bins": <bins>}}}
"""
main_key = "spectral_fit_parameters"
if not config.get(main_key):
logging.getLogger("extra").debug("\n\n%s\n" % str(config))
raise KeyError("Cannot read spectra fit config dictionary. "
"Please check it has the correct form")
parameters = OrderedDict()
for syst in config[main_key]:
if "rate" in syst:
parameters[syst] = RateParameter(syst,
**config[main_key][syst])
else:
raise IndexError("Unknown systematic in config: %s" % syst)
return cls(name, parameters, spectra_name)
@classmethod
def load_from_file(cls, filename, spectra_name, name=None):
"""Initialise SpectraFitConfig class from a config file (classmethod).
Args:
filename (str): path to config file
spectra_name (string): Name of the spectra associated with the
:class:`echidna.core.spectra.SpectraFitConfig`
name (string, optional): Assign a name to the
:class:`SpectraFitConfig` created. If no name is supplied,
the default is 'spectral_fit_config'. If a blank string is
supplied, the name of the file will be used.
Returns:
(:class:`SpectraFitConfig`): A config object containing the
parameters in the file.
"""
config = yaml_loader.ordered_load(open(filename, 'r'))
if not name:
return cls.load(config, spectra_name)
if name == "":
name = filename[filename.rfind("/")+1:filename.rfind(".")]
return cls.load(config, spectra_name, name=name)
class SpectraConfig(Config):
"""Configuration container for Spectra objects. Able to load
directly with a set list of SpectraParameters or from yaml
configuration files.
Args:
parameters (:class:`OrderedDict`): List of
SpectraParameter objects
"""
def __init__(self, config_name, parameters):
"""Initialise SpectraConfig class
"""
super(SpectraConfig, self).__init__(config_name, parameters)
self._type = "spectra"
def dump(self):
""" Dumps the spectra config to a config dictionary, containing
all spectra parameters. The dictionary has the form specified
in the :meth:`SpectraConfig.load` method.
Returns:
dict: Dictionary containing all the information on the
spectra parameters.
"""
# Spectral parameters
main_key = "parameters"
config = OrderedDict()
config[main_key] = OrderedDict()
for parameter in self.get_pars():
par = self.get_par(parameter)
# Get parameter dict from par
config[main_key][parameter] = par.to_dict()
return config
def dump_to_file(self, path="", filename=None):
""" Write spectra config to YAML file.
Args:
path (string, optional): Location to save yaml file to,
default is the current directory.
filename (string, optional): Filename for yaml file. If no
filename is supplied, the default is "spectra_config.yml".
If a blank filename "" is given the config's name is used.
"""
config = self.dump()
if filename is None:
filename = "spectra_config"
elif filename == "":
filename = self.get_name()
if ".yml" not in filename:
filename += ".yml"
with open(path+filename, "w") as stream:
yaml_loader.ordered_dump(config, stream=stream, indent=8)
@classmethod
def load(cls, config, name="config"):
"""Initialise SpectraConfig class from a config dictionary
(classmethod).
Args:
config (dict): Dictionary to create spectra config out of.
name (string, optional): Name to assign to the
:class:`SpectraConfig`. If no name is supplied the default
'spectra_config' will be used.
Returns:
(:class:`SpectraConfig`): A config object containing the
spectra parameters from the config dictionary.
Raises:
KeyError: If the :obj:`config` dictionary does not start with
the key 'parameters' as this suggests the dictionary has
the wrong format.
.. warning:: :obj:`config` must have valid format.
Valid format is::
{"parameters": {
"<spectral_parameter>": {
"low": <low>,
"high": <high>.
"bins": <bins>}}}
"""
main_key = "parameters"
if not config.get(main_key):
logging.getLogger("extra").debug("\n\n%s\n" % str(config))
raise KeyError("Cannot read config dictionary. "
"Please check it has the correct form")
parameters = OrderedDict()
for parameter in config[main_key]:
parameters[parameter] = SpectraParameter(
parameter, **config[main_key][parameter])
return cls(name, parameters)
@classmethod
def load_from_file(cls, filename, name=None):
"""Initialise SpectraConfig class from a config file
(classmethod).
Args:
filename (str): path to config file
name (string, optional): Assign a name to the
:class:`SpectraConfig` created. If no name is supplied, the
default is 'spectra_config'. If a blank string is supplied,
the name of the file will be used.
Returns:
(:class:`SpectraConfig`): A config object containing the
parameters in the file.
"""
with open(filename, 'r') as stream:
config = yaml_loader.ordered_load(stream)
if not name:
return cls.load(config)
if name == "":
name = filename[filename.rfind("/")+1:filename.rfind(".")]
return cls.load(config, name)
def get_dims(self):
"""Get list of dimension names.
The _mc, _reco and _truth suffixes are removed.
Returns:
list: List of the dimensions names of the config.
"""
dims = []
for par in sorted(self._parameters.keys()):
par = par.split('_')[:-1]
dim = ""
for entry in par:
dim += entry+"_"
dims.append(dim[:-1])
return dims
def get_dim(self, par):
"""Get the dimension of par.
The _mc, _reco and _truth suffixes are removed.
Args:
par (string): Name of the parameter
Returns:
The dimension of par
"""
dim = ""
for entry in par.split('_')[:-1]:
dim += entry+"_"
return dim[:-1]
def get_dim_type(self, dim):
"""Returns the type of the dimension i.e. mc, reco or truth.
Args:
dim (string): The name of the dimension
Raises:
IndexError: dim is not in the spectra.
Returns:
string: The type of the dimension (mc, reco or truth)
"""
for par in sorted(self._parameters.keys()):
par_split = par.split('_')[:-1]
cur_dim = ""
for entry in par_split:
cur_dim += entry+"_"
if cur_dim[:-1] == dim:
return str(par.split('_')[-1])
raise IndexError("No %s dimension in spectra" % dim)
| mit |
takeshineshiro/django | django/contrib/gis/geos/geometry.py | 62 | 23807 | """
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
from __future__ import unicode_literals
import json
from ctypes import addressof, byref, c_double
from django.contrib.gis import gdal
from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.mutable_list import ListMixin
from django.contrib.gis.geos.prepared import PreparedGeometry
from django.contrib.gis.geos.prototypes.io import (
ewkb_w, wkb_r, wkb_w, wkt_r, wkt_w,
)
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
_GEOS_CLASSES = None
ptr_type = GEOM_PTR
has_cs = False # Only Point, LineString, LinearRing have coordinate sequences
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'):
srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif json_regex.match(geo_input):
# Handling GeoJSON input.
if not gdal.HAS_GDAL:
raise ValueError('Initializing geometry from JSON input requires GDAL.')
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geometry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, six.memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if g:
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int):
self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
if GEOSGeometry._GEOS_CLASSES is None:
# Lazy-loaded variable to avoid import conflicts with GEOSGeometry.
from .linestring import LineString, LinearRing
from .point import Point
from .polygon import Polygon
from .collections import (
GeometryCollection, MultiPoint, MultiLineString, MultiPolygon)
GEOSGeometry._GEOS_CLASSES = {
0: Point,
1: LineString,
2: LinearRing,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
}
self.__class__ = GEOSGeometry._GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr and capi:
capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"EWKT is used for the string representation."
return self.ewkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return bytes(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(six.memoryview(wkb))
if not ptr:
raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, six.string_types):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
# ### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
# #### Coordinate Sequence Routines ####
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
# #### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr).decode()
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
# #### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
return capi.geos_isvalidreason(self.ptr).decode()
# #### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, six.string_types) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
# #### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0:
return None
else:
return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
# #### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (SRID + WKT) of the Geometry. Note that Z values
are only included in this representation if GEOS >= 3.3.0.
"""
if self.get_srid():
return 'SRID=%s;%s' % (self.srid, self.wkt)
else:
return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w(3 if self.hasz else 2).write(self).decode()
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID is not included in this representation because it is not
a part of the OGC specification (use the `hexewkb` property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w(3 if self.hasz else 2).write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID value that are
a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry.
"""
return json.dumps({'type': self.__class__.__name__, 'coordinates': self.coords})
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w(3 if self.hasz else 2).write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
value that are a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
return PreparedGeometry(self)
# #### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
if self.srid:
try:
return gdal.OGRGeometry(self.wkb, self.srid)
except gdal.SRSException:
pass
return gdal.OGRGeometry(self.wkb)
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to return a SpatialReference object.')
if self.srid:
try:
return gdal.SpatialReference(self.srid)
except gdal.SRSException:
pass
return None
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
# #### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr).decode()
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
# #### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
from .point import Point
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumference of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
class ProjectInterpolateMixin(object):
"""
Used for LineString and MultiLineString.
"""
def interpolate(self, distance):
return self._topology(capi.geos_interpolate(self.ptr, distance))
def interpolate_normalized(self, distance):
return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))
def project(self, point):
from .point import Point
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
return capi.geos_project(self.ptr, point.ptr)
def project_normalized(self, point):
from .point import Point
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
return capi.geos_project_normalized(self.ptr, point.ptr)
| bsd-3-clause |
kjenagan/support-tools | wiki_to_md/impl/constants.py | 151 | 6658 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used during conversion."""
import re
# These are the various different matching possibilities Google Code
# recognizes. As matches are made, the respective handler class method is
# is called, which can do what it wishes with the match.
# The pragmas:
PRAGMA_NAMES = ["summary", "labels", "sidebar"]
PRAGMA_RE = re.compile(r"^#(" + "|".join(PRAGMA_NAMES) + r")(.*)$")
# Whitespace:
WHITESPACE_RE = re.compile(r"\s+")
INDENT_RE = re.compile(r"\A\s*")
# Code blocks:
START_CODEBLOCK_RE = re.compile(r"^{{{$")
END_CODEBLOCK_RE = re.compile(r"^}}}$")
# Line rules. These rules consume an entire line:
LINE_FORMAT_RULES = [
r"""(?P<HRule>
^
----+
$
)""",
r"""(?P<Heading>
^
=+\s* # Matches the leading delimiter
.* # Matches the heading title text
\s*=+\s* # Matches the trailing delimiter
$
)""",
]
LINE_FORMAT_RE = re.compile("(?x)" + "|".join(LINE_FORMAT_RULES), re.UNICODE)
# General formatting rules:
SIMPLE_FORMAT_RULE = r"""
(?P<{0}>
(?:
(?<=\W|_) # Match only if preceded by an authorized delimiter
{1} # The opening format character
) |
(?:
{1} # Or match the closing format character...
(?=\W|_) # But only if followed by an authorized delimiter
) |
(?:
^ # Or match the format character at the start of a line...
{1}
) |
(?:
{1} # Or at the end of a line.
$
)
)
"""
URL_SCHEMA_RULE = r"(https?|ftp|nntp|news|mailto|telnet|file|irc)"
OPTIONAL_DESC_RULE = r"(?:\s+[^]]+)?"
VALID_PAGENAME = r"(([A-Za-z0-9][A-Za-z0-9_]*)?[A-Za-z0-9])"
# Link anchors use the Fragment ID pattern from RFC 1630.
# Dropping the quotes for security considerations.
XALPHA_RULE = r"[A-Za-z0-9%$-_@.&!*\(\),]"
# Only WikiWords matching this pattern are detected and autolinked in the text.
WIKIWORD_AUTOLINK_RULE = (
r"(?:[A-Z][a-z0-9]+_*)+(?:[A-Z][a-z0-9]+)(?:[#]{0}*?)?".format(XALPHA_RULE))
WIKIWORD_RULE = r"(?:{0}?(?:[#]{1}*?)?)".format(VALID_PAGENAME, XALPHA_RULE)
# "Plugins" are anything that looks like an XML/HTML tag.
PLUGIN_NAME = r"[a-zA-Z0-9_\-]+" # Matches a plugin name.
PLUGIN_ID = r"({0}:)?{0}".format(PLUGIN_NAME) # Matches a namespace and name.
PLUGIN_PARAM = r"""({0})\s*=\s*("[^"]*"|'[^']*'|\S+)""".format(PLUGIN_NAME)
PLUGIN = r"<{0}(?:\s+{1})*\s*/?>".format(PLUGIN_ID, PLUGIN_PARAM)
PLUGIN_END = r"</{0}>".format(PLUGIN_ID)
PLUGIN_ID_RE = re.compile(PLUGIN_ID, re.UNICODE)
PLUGIN_PARAM_RE = re.compile(PLUGIN_PARAM, re.UNICODE)
PLUGIN_RE = re.compile(PLUGIN, re.UNICODE)
PLUGIN_END_RE = re.compile(PLUGIN_END, re.UNICODE)
TEXT_FORMAT_RULES = [
SIMPLE_FORMAT_RULE.format("Bold", r"\*"),
SIMPLE_FORMAT_RULE.format("Italic", "_"),
SIMPLE_FORMAT_RULE.format("Strikethrough", "~~"),
r"\^(?P<Superscript>.+?)\^",
r",,(?P<Subscript>.+?),,",
r"`(?P<InlineCode>.+?)`",
r"\{\{\{(?P<InlineCode2>.+?)\}\}\}",
r"""# Matches an entire table cell
(?P<TableCell>
(?:\|\|)+ # Any number of start markers, to support rowspan
.*? # Text of the table cell
(?=\|\|) # Assertion that we have a table cell end
)""",
r"(?P<TableRowEnd>\|\|\s*$)",
r"""# Matches a freestanding URL in the source text.
(?P<Url>
\b(?:{0}://|(mailto:)) # Matches supported URL schemas
[^\s'\"<]+ # Match at least one character that is
# authorized within a URL.
[^\s'\"<.,}})\]]+ # After that, match all the way up to the first
# character that looks like a terminator.
)""".format(URL_SCHEMA_RULE),
r"""# Matches bracketed URLs: [http://foo.bar An optional description]
(?P<UrlBracket>
\[
(?:{0}://|(mailto:)) # Matches supported URL schemas
[^]\s]+ # Matches up to the closing bracket or whitespace
{1} # Matches the optional URL description
\]
)""".format(URL_SCHEMA_RULE, OPTIONAL_DESC_RULE),
r"""# Matches a WikiWord embedded in the text.
(?:
(?<![A-Za-z0-9\[]) # Matches the WikiWord only if it's not preceded
# by an alphanumeric character or a bracket.
(?P<WikiWord>
!? # The WikiWord is preceded by an optional exclamation
# mark, which makes it not a link. However, we still
# need to match it as being a link, so that we can strip
# the exclamation mark from the resulting plaintext WikiWord.
{0} # The WikiWord itself
)
(?![A-Za-z0-9]) # Matches the WikiWord only if it's not followed
# by alphanumeric characters.
)""".format(WIKIWORD_AUTOLINK_RULE),
r"""# Matches a forced/named WikiLink: [WikiWord an optional description]
(?P<WikiWordBracket>
\[
{0} # Matches the WikiWord
{1} # Matches the optional WikiLink description
\]
)""".format(WIKIWORD_RULE, OPTIONAL_DESC_RULE),
r"""# Matches an issue reference.
(?P<IssueLink>
(
\b([Ii][Ss][Ss][Uu][Ee]|[Bb][Uu][Gg])\s*\#?
)
\d+\b
)
""",
r"""# Matches a revision reference.
(?P<RevisionLink>
(
\b[Rr]([Ee][Vv][Ii][Ss][Ii][Oo][Nn]\s*\#?)?
)
\d+\b
)
""",
r"(?P<Plugin>{0})".format(PLUGIN),
r"(?P<PluginEnd>{0})".format(PLUGIN_END),
r"""# Matches a variable being used, defined in a plugin or globally.
%%(?P<Variable>[\w|_|\-]+)%%"""
]
TEXT_FORMAT_RE = re.compile("(?x)" + "|".join(TEXT_FORMAT_RULES), re.UNICODE)
# For verification of YouTube video IDs.
YOUTUBE_VIDEO_ID_RE = re.compile("^[a-zA-Z0-9_-]+$")
# List types:
LIST_TYPES = {
"1": "numeric",
"#": "numeric",
"*": "bullet",
" ": "blockquote",
}
| apache-2.0 |
njmube/erpnext | erpnext/accounts/report/gross_profit/gross_profit.py | 2 | 11352 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, scrub
from erpnext.stock.utils import get_incoming_rate
from frappe.utils import flt
def execute(filters=None):
if not filters: filters = frappe._dict()
company_currency = frappe.db.get_value("Company", filters.company, "default_currency")
gross_profit_data = GrossProfitGenerator(filters)
data = []
source = gross_profit_data.grouped_data if filters.get("group_by") != "Invoice" else gross_profit_data.data
group_wise_columns = frappe._dict({
"invoice": ["parent", "customer", "customer_group", "posting_date","item_code", "item_name","item_group", "brand", "description", \
"warehouse", "qty", "base_rate", "buying_rate", "base_amount",
"buying_amount", "gross_profit", "gross_profit_percent", "project"],
"item_code": ["item_code", "item_name", "brand", "description", "qty", "base_rate",
"buying_rate", "base_amount", "buying_amount", "gross_profit", "gross_profit_percent"],
"warehouse": ["warehouse", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"territory": ["territory", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"brand": ["brand", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"item_group": ["item_group", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"customer": ["customer", "customer_group", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"customer_group": ["customer_group", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"sales_person": ["sales_person", "allocated_amount", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"project": ["project", "base_amount", "buying_amount", "gross_profit", "gross_profit_percent"],
"territory": ["territory", "base_amount", "buying_amount", "gross_profit", "gross_profit_percent"]
})
columns = get_columns(group_wise_columns, filters)
for src in source:
row = []
for col in group_wise_columns.get(scrub(filters.group_by)):
row.append(src.get(col))
row.append(company_currency)
data.append(row)
return columns, data
def get_columns(group_wise_columns, filters):
columns = []
column_map = frappe._dict({
"parent": _("Sales Invoice") + ":Link/Sales Invoice:120",
"posting_date": _("Posting Date") + ":Date",
"posting_time": _("Posting Time"),
"item_code": _("Item Code") + ":Link/Item",
"item_name": _("Item Name"),
"item_group": _("Item Group") + ":Link/Item Group",
"brand": _("Brand"),
"description": _("Description"),
"warehouse": _("Warehouse") + ":Link/Warehouse",
"qty": _("Qty") + ":Float",
"base_rate": _("Avg. Selling Rate") + ":Currency/currency",
"buying_rate": _("Avg. Buying Rate") + ":Currency/currency",
"base_amount": _("Selling Amount") + ":Currency/currency",
"buying_amount": _("Buying Amount") + ":Currency/currency",
"gross_profit": _("Gross Profit") + ":Currency/currency",
"gross_profit_percent": _("Gross Profit %") + ":Percent",
"project": _("Project") + ":Link/Project",
"sales_person": _("Sales person"),
"allocated_amount": _("Allocated Amount") + ":Currency/currency",
"customer": _("Customer") + ":Link/Customer",
"customer_group": _("Customer Group") + ":Link/Customer Group",
"territory": _("Territory") + ":Link/Territory"
})
for col in group_wise_columns.get(scrub(filters.group_by)):
columns.append(column_map.get(col))
columns.append({
"fieldname": "currency",
"label" : _("Currency"),
"fieldtype": "Link",
"options": "Currency"
})
return columns
class GrossProfitGenerator(object):
def __init__(self, filters=None):
self.data = []
self.average_buying_rate = {}
self.filters = frappe._dict(filters)
self.load_invoice_items()
self.load_stock_ledger_entries()
self.load_product_bundle()
self.load_non_stock_items()
self.process()
def process(self):
self.grouped = {}
for row in self.si_list:
if self.skip_row(row, self.product_bundles):
continue
row.base_amount = flt(row.base_net_amount)
product_bundles = []
if row.update_stock:
product_bundles = self.product_bundles.get(row.parenttype, {}).get(row.parent, frappe._dict())
elif row.dn_detail:
product_bundles = self.product_bundles.get("Delivery Note", {})\
.get(row.delivery_note, frappe._dict())
row.item_row = row.dn_detail
# get buying amount
if row.item_code in product_bundles:
row.buying_amount = self.get_buying_amount_from_product_bundle(row,
product_bundles[row.item_code])
else:
row.buying_amount = self.get_buying_amount(row, row.item_code)
# get buying rate
if row.qty:
row.buying_rate = row.buying_amount / row.qty
row.base_rate = row.base_amount / row.qty
else:
row.buying_rate, row.base_rate = 0.0, 0.0
# calculate gross profit
row.gross_profit = row.base_amount - row.buying_amount
if row.base_amount:
row.gross_profit_percent = (row.gross_profit / row.base_amount) * 100.0
else:
row.gross_profit_percent = 0.0
# add to grouped
if self.filters.group_by != "Invoice":
self.grouped.setdefault(row.get(scrub(self.filters.group_by)), []).append(row)
self.data.append(row)
if self.grouped:
self.get_average_rate_based_on_group_by()
else:
self.grouped_data = []
def get_average_rate_based_on_group_by(self):
# sum buying / selling totals for group
self.grouped_data = []
for key in self.grouped.keys():
for i, row in enumerate(self.grouped[key]):
if i==0:
new_row = row
else:
new_row.qty += row.qty
new_row.buying_amount += row.buying_amount
new_row.base_amount += row.base_amount
new_row.gross_profit = new_row.base_amount - new_row.buying_amount
new_row.gross_profit_percent = ((new_row.gross_profit / new_row.base_amount) * 100.0) \
if new_row.base_amount else 0
new_row.buying_rate = (new_row.buying_amount / new_row.qty) \
if new_row.qty else 0
new_row.base_rate = (new_row.base_amount / new_row.qty) \
if new_row.qty else 0
self.grouped_data.append(new_row)
def skip_row(self, row, product_bundles):
if self.filters.get("group_by") != "Invoice" and not row.get(scrub(self.filters.get("group_by"))):
return True
def get_buying_amount_from_product_bundle(self, row, product_bundle):
buying_amount = 0.0
for packed_item in product_bundle:
if packed_item.get("parent_detail_docname")==row.item_row:
buying_amount += self.get_buying_amount(row, packed_item.item_code)
return buying_amount
def get_buying_amount(self, row, item_code):
# IMP NOTE
# stock_ledger_entries should already be filtered by item_code and warehouse and
# sorted by posting_date desc, posting_time desc
if item_code in self.non_stock_items:
#Issue 6089-Get last purchasing rate for non-stock item
item_rate = self.get_last_purchase_rate(item_code)
return flt(row.qty) * item_rate
else:
my_sle = self.sle.get((item_code, row.warehouse))
if (row.update_stock or row.dn_detail) and my_sle:
parenttype, parent = row.parenttype, row.parent
if row.dn_detail:
parenttype, parent = "Delivery Note", row.delivery_note
for i, sle in enumerate(my_sle):
# find the stock valution rate from stock ledger entry
if sle.voucher_type == parenttype and parent == sle.voucher_no and \
sle.voucher_detail_no == row.item_row:
previous_stock_value = len(my_sle) > i+1 and \
flt(my_sle[i+1].stock_value) or 0.0
return previous_stock_value - flt(sle.stock_value)
else:
return flt(row.qty) * self.get_average_buying_rate(row, item_code)
return 0.0
def get_average_buying_rate(self, row, item_code):
if not item_code in self.average_buying_rate:
if item_code in self.non_stock_items:
self.average_buying_rate[item_code] = flt(frappe.db.sql("""select sum(base_net_amount) / sum(qty * conversion_factor)
from `tabPurchase Invoice Item`
where item_code = %s and docstatus=1""", item_code)[0][0])
else:
self.average_buying_rate[item_code] = get_incoming_rate(row)
return self.average_buying_rate[item_code]
def get_last_purchase_rate(self, item_code):
if self.filters.to_date:
last_purchase_rate = frappe.db.sql("""
select (a.base_rate / a.conversion_factor)
from `tabPurchase Invoice Item` a
where a.item_code = %s and a.docstatus=1
and modified <= %s
order by a.modified desc limit 1""", (item_code,self.filters.to_date))
else:
last_purchase_rate = frappe.db.sql("""
select (a.base_rate / a.conversion_factor)
from `tabPurchase Invoice Item` a
where a.item_code = %s and a.docstatus=1
order by a.modified desc limit 1""", item_code)
return flt(last_purchase_rate[0][0]) if last_purchase_rate else 0
def load_invoice_items(self):
conditions = ""
if self.filters.company:
conditions += " and company = %(company)s"
if self.filters.from_date:
conditions += " and posting_date >= %(from_date)s"
if self.filters.to_date:
conditions += " and posting_date <= %(to_date)s"
self.si_list = frappe.db.sql("""select item.parenttype, item.parent,
si.posting_date, si.posting_time, si.project, si.update_stock,
si.customer, si.customer_group, si.territory,
item.item_code, item.item_name, item.description, item.warehouse,
item.item_group, item.brand, item.dn_detail, item.delivery_note,
item.qty, item.base_net_rate, item.base_net_amount, item.name as "item_row",
sales.sales_person, sales.allocated_amount, sales.incentives
from `tabSales Invoice` si
inner join `tabSales Invoice Item` item on item.parent = si.name
left join `tabSales Team` sales on sales.parent = si.name
where
si.docstatus = 1 and si.is_return != 1 %s
order by
si.posting_date desc, si.posting_time desc""" % (conditions,), self.filters, as_dict=1)
def load_stock_ledger_entries(self):
res = frappe.db.sql("""select item_code, voucher_type, voucher_no,
voucher_detail_no, stock_value, warehouse, actual_qty as qty
from `tabStock Ledger Entry`
where company=%(company)s
order by
item_code desc, warehouse desc, posting_date desc,
posting_time desc, name desc""", self.filters, as_dict=True)
self.sle = {}
for r in res:
if (r.item_code, r.warehouse) not in self.sle:
self.sle[(r.item_code, r.warehouse)] = []
self.sle[(r.item_code, r.warehouse)].append(r)
def load_product_bundle(self):
self.product_bundles = {}
for d in frappe.db.sql("""select parenttype, parent, parent_item,
item_code, warehouse, -1*qty as total_qty, parent_detail_docname
from `tabPacked Item` where docstatus=1""", as_dict=True):
self.product_bundles.setdefault(d.parenttype, frappe._dict()).setdefault(d.parent,
frappe._dict()).setdefault(d.parent_item, []).append(d)
def load_non_stock_items(self):
self.non_stock_items = frappe.db.sql_list("""select name from tabItem
where is_stock_item=0""")
| agpl-3.0 |
bauruine/ansible | lib/ansible/utils/module_docs.py | 37 | 3751 | #!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import ast
import yaml
import traceback
from ansible import utils
# modules that are ok that they do not have documentation strings
BLACKLIST_MODULES = [
'async_wrapper', 'accelerate', 'async_status'
]
def get_docstring(filename, verbose=False):
"""
Search for assignment of the DOCUMENTATION and EXAMPLES variables
in the given file.
Parse DOCUMENTATION from YAML and return the YAML doc or None
together with EXAMPLES, as plain text.
DOCUMENTATION can be extended using documentation fragments
loaded by the PluginLoader from the module_docs_fragments
directory.
"""
doc = None
plainexamples = None
try:
# Thank you, Habbie, for this bit of code :-)
M = ast.parse(''.join(open(filename)))
for child in M.body:
if isinstance(child, ast.Assign):
if 'DOCUMENTATION' in (t.id for t in child.targets):
doc = yaml.safe_load(child.value.s)
fragment_slug = doc.get('extends_documentation_fragment',
'doesnotexist').lower()
# Allow the module to specify a var other than DOCUMENTATION
# to pull the fragment from, using dot notation as a separator
if '.' in fragment_slug:
fragment_name, fragment_var = fragment_slug.split('.', 1)
fragment_var = fragment_var.upper()
else:
fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION'
if fragment_slug != 'doesnotexist':
fragment_class = utils.plugins.fragment_loader.get(fragment_name)
assert fragment_class is not None
fragment_yaml = getattr(fragment_class, fragment_var, '{}')
fragment = yaml.safe_load(fragment_yaml)
if fragment.has_key('notes'):
notes = fragment.pop('notes')
if notes:
if not doc.has_key('notes'):
doc['notes'] = []
doc['notes'].extend(notes)
if 'options' not in fragment.keys():
raise Exception("missing options in fragment, possibly misformatted?")
for key, value in fragment.items():
if not doc.has_key(key):
doc[key] = value
else:
doc[key].update(value)
if 'EXAMPLES' in (t.id for t in child.targets):
plainexamples = child.value.s[1:] # Skip first empty line
except:
traceback.print_exc() # temp
if verbose == True:
traceback.print_exc()
print "unable to parse %s" % filename
return doc, plainexamples
| gpl-3.0 |
christabor/csscms | csscms/parser.py | 1 | 18081 | from tinycss.page3 import CSSPage3Parser
import css_properties
from css_options import css_opts
from validations import ValidationHelpersMixin
DEBUG = True if __name__ == '__main__' else False
class MissingTokenType(Exception):
def __init__(self):
print('Invalid token type: please add a '
'new one to the token types config.')
class MissingAtKeywordType(Exception):
def __init__(self):
print('Invalid @ keyword type. Options are: {}'.format(
' ').join(css_opts['at_types']))
class InputBuilder(ValidationHelpersMixin, CSSPage3Parser):
"""
Convention: all public methods return `self` to allow for chaining.
TODO: docs, docstrings
"""
def __init__(
self, data, unwanted_props=[],
css_input_wrapper_class='css-func',
custom_input_html=None, show_empty=False, use_bytes=False):
self.use_value = True
self._generated_data = None
self.css_input_wrapper_class = css_input_wrapper_class
self.unwanted_props = unwanted_props
self.show_empty_declarations = show_empty
self.custom_input_html = custom_input_html
if use_bytes:
self.stylesheet = self.parse_stylesheet_bytes(data)
else:
self.stylesheet = self.parse_stylesheet_file(data)
self.animation_group_html = ('<div class="animation-group">'
'{percentages}</div>')
self.surrounding_html = '<div class="{}">{}</div>'
self.container_html = ('<div class="selector-group">\n'
'<span class="selector-label">'
'{selector}</span> {}\n{code}{}</div>\n')
self.default_input_html = ('<label>\n<em>{name}:</em>'
'\n{input_html}\n</label>\n')
def parse_media(self, tokens):
"""Private method overridden from tinycss."""
mediaquery_tokens = [f for f in tokens if f.type == 'IDENT']
return mediaquery_tokens
def parse_at_rule(self, rule, previous_rules, errors, context):
"""Inject a custom property for filtering purposes.
This method overrides the private tinycss method."""
if rule.at_keyword == '@keyframes':
rule.keyframes = True
return rule
# Parse the rest normally
return super(InputBuilder, self).parse_at_rule(
rule, previous_rules, errors, context)
def _strip_quotes(self, val):
"""Normalize properties with beginning or
trailing quotations, like `content: ""`
"""
if type(val) != 'str':
return val
if val.startswith('"'):
val = val[1:]
if val.endswith('"'):
val = val[:-1]
return val
def _convert_odd_types(self, value):
try:
return css_opts['odd_props'][value]
except KeyError:
return None
def _get_dropdown_html(self, props, name='', token=None):
"""Takes name and value, then builds
matching select > option html"""
# Accompanying input html required for some
# non-dropdown complementary fields
non_dropdown_html = ''
dropdown_html = '<select name="{}">'.format(name)
for prop in props:
# One off cases where some value should be represented
# by a different field type
if prop in css_opts['odd_props']:
new_token_type = self._convert_odd_types(prop)
if new_token_type is not None:
non_dropdown_html += self._get_input_html(
None,
new_token_type, prop, prop)
else:
# Build the /actual/ option html.
dropdown_html += self._get_input_html(
None,
'OPTION', prop, prop, selected='')
dropdown_html += '</select>'
return (non_dropdown_html + (
'<em class="or-divider">or</em>'
if non_dropdown_html else '') + dropdown_html)
def _is_cruft(self, token_type):
if token_type in ['S', 'DELIM']:
return True
return False
def _get_input_html(self, selector, token_type, name, value, **kwargs):
if self._is_cruft(token_type):
return ''
value = self._strip_quotes(value)
try:
# Plain ol' direct mapping
return css_opts['types'][token_type].format(
name='{}[{}]'.format(selector, name), placeholder=value,
value=value if self.use_value else '', **kwargs)
except KeyError:
raise MissingTokenType
def _wrap_input_html(self, **kwargs):
"""Wraps form field grouping with surrounding html"""
# Allow arbitrary custom html, so long as the kwargs
# match up the format kwargs -- otherwise error will be thrown.
wrapper = (self.custom_input_html if self.custom_input_html
else self.default_input_html)
html = wrapper.format(**kwargs)
return self.surrounding_html.format(self.css_input_wrapper_class, html)
def _get_new_type(self, css):
if self._is_hex(css):
return 'HASH'
elif self._is_percentage(css):
return 'PERCENTAGE'
elif self._is_float(css):
return 'FLOAT'
elif self._is_int(css):
return 'INTEGER'
else:
return 'IDENT'
def _get_token_value(self, token):
try:
token_value = token.value
except AttributeError:
token_value = token.function_name
return token_value
def _get_form_html_data(
self, selector, token, prop_name, priority=None, shorthand=False):
"""Generates form html to be used by html builder"""
if self._is_cruft(token.type):
return ''
token_value = self._get_token_value(token)
# Normalize single vs multiple valued declarations
try:
prop_key = css_properties.rules[prop_name]
# Only overwrite string if it's not container type
if not shorthand and prop_key['dropdown']:
html = self._get_dropdown_html(
prop_key['values'], name=prop_name, token=token.type)
else:
html = self._get_input_html(
selector, token.type, prop_name, token_value)
except KeyError:
if DEBUG:
print('[ERROR] Property: "{}"'.format(prop_name))
# Try to recover gracefully with the appropriate type
_css = token.as_css()
new_type = self._get_new_type(_css)
html = self._get_input_html(
selector, new_type, prop_name, token_value)
if priority:
html += '<label>Important? {}</label>'.format(self._get_input_html(
selector, 'BOOLEAN', 'important', 'important',
checked='checked'))
return html
def _get_at_keyword_type(self, ruleset):
keys = ruleset.keys()
if 'uri' in keys or 'media' in keys:
return 'import'
if 'rules' in keys:
return 'media'
if 'keyframes' in keys:
return 'keyframes'
if 'at_keyword' in keys:
return ruleset['at_keyword'].replace('@', '')
raise MissingAtKeywordType
def _group_keyframe_tokens(self, tokens):
"""Groups a list of tokens from tinycss by brackets and contained css.
Since all tokens come in as one list, we need to group by individual
percentage declarations to allow differentiating between groups.
@keyframes myanimation {
10%, 20% {}
50, 60% {}
100% {}
}
"""
token_groups = {}
current_group = 0
for token in tokens:
# Skip some pieces that are unnecessary,
# like empty strings or commas/fragments
if token.as_css()[0] not in [',', ' ']:
try:
if token.as_css().startswith('{'):
token_groups[current_group]['rules'].append(token)
else:
token_groups[current_group]['percentages'].append(token)
except KeyError:
token_groups[current_group] = {
'percentages': [],
'rules': []
}
# Move to the next set of declarations
if token.as_css().startswith('{'):
current_group += 1
return token_groups
def _generate_keyframes_declarations(self, ruleset):
inputs = []
junk_types = ['DELIM', 'S', ':', ';']
token_groups = self._group_keyframe_tokens(ruleset.body)
for _, token_group in token_groups.iteritems():
for token in token_group['rules']:
percentages = ', '.join(
[t.as_css() for t in token_group['percentages']])
# All tokens are container tokens
if hasattr(token, 'is_container'):
# Parse container tokens
sub_tokens = [t for t in token.content if t.type
not in junk_types]
for key, sub_token in enumerate(sub_tokens):
if not self._is_valid_css_declaration(
sub_token.as_css()):
continue
if sub_token.type == 'FUNCTION':
function_tokens = [t for t in sub_token.content
if t.type not in junk_types]
for k, function_token in enumerate(function_tokens):
label = '{} ({})'.format(
sub_token.function_name, k)
name = '{}_{}'.format(
sub_token.function_name, k)
input_html = self._get_input_html(
label,
function_token.type, name,
function_token.as_css())
kwargs = {
'name': label,
'value': function_token.as_css(),
'input_html': input_html
}
inputs.append(self._wrap_input_html(**kwargs))
else:
html = ''
# Show, but don't create a field for
# percentage groupings
if sub_token.type == 'IDENT':
input_html = ''
else:
input_html = self._get_input_html(
label,
sub_token.type,
self._get_token_value(sub_token),
sub_token.as_css())
kwargs = {
'name': self._get_token_value(sub_token),
'value': sub_token.as_css(),
'input_html': input_html
}
# Only show the percentage label once per group
if key == 2:
group_html = self.animation_group_html.format(
percentages=percentages)
else:
group_html = ''
html += '{} {}'.format(
group_html, self._wrap_input_html(**kwargs))
inputs.append(html)
return inputs
def _generate_mediaquery_declarations(self, ruleset):
inputs = []
for rule in ruleset.rules:
sub_inputs = self._generate_regular_declarations(rule)
# Re-build parsed selector
selector = ''.join([s.value for s in rule.selector])
kwargs = {
'name': selector,
'input_html': ''.join(sub_inputs)
}
# Process all sub rules for every individual "parent" media rule.
inputs.append(self._wrap_input_html(**kwargs))
return inputs
def _generate_import_declarations(self, ruleset):
inputs, name = [], 'url ({})'.format(ruleset.uri)
input_html = self._get_input_html(name, 'URI', ('import-url'), name)
kwargs = {
'name': name,
'value': name,
'input_html': input_html
}
inputs.append(self._wrap_input_html(**kwargs))
return inputs
def _generate_regular_declarations(self, ruleset):
inputs = []
# All declarations in the selector
for declaration in ruleset.declarations:
# Property, e.g. background-color
prop_name = declaration.name
if self._is_valid_css_declaration(prop_name):
priority = declaration.priority
is_shorthand = prop_name in css_opts['shorthand']
# if is_shorthand and prop_name
html = ''
# Declaration tokens, e.g. "[2px, solid, #4444]"
for token in declaration.value:
if self._is_cruft(token.type):
continue
if hasattr(token, 'function_name'):
# Update prop_name to add function for more context
prop_name = '{} ({})'.format(
prop_name, token.function_name)
if not self._is_valid_css_declaration(
token.function_name):
continue
if token.function_name in css_opts['pseudo_shorthand']:
is_shorthand = True
if hasattr(token, 'content'):
for sub_token in token.content:
html += self._get_form_html_data(
ruleset.selector.as_css(),
sub_token, prop_name, priority=priority,
shorthand=is_shorthand)
else:
if is_shorthand:
# Note: shorthand properties are not grouped
# with appropriate dropdowns
# like single declarations, but rather, are
# converted to plain text inputs.
html += self._get_input_html(
ruleset.selector.as_css(),
token.type, token.unit,
self._get_token_value(token))
else:
html = self._get_form_html_data(
ruleset.selector.as_css(),
token, prop_name, priority=priority,
shorthand=is_shorthand)
# Add the final rendered html + labels, etc
# Only append properties that could be
# rendered as form fields
if html or self.show_empty_declarations:
inputs.append(
self._wrap_input_html(
**{'name': prop_name, 'input_html': html}))
return inputs
def _get_generator(self, ruleset, at_keyword=False):
if at_keyword:
# e.g. @import url('foo.css') projection, tv;
group_label = '@' + self._get_at_keyword_type(ruleset)
# Drill down further to determine the @ keyword type
try:
label_map = {
'@import': self._generate_import_declarations,
'@media': self._generate_mediaquery_declarations,
'@keyframes': self._generate_keyframes_declarations
}
active_func = label_map[group_label]
except KeyError:
raise MissingAtKeywordType
# Customize group label for keyframes
if group_label == '@keyframes':
group_label += ' ' + ruleset.head[0].as_css()
else:
# The group or single selector:
# .foo, .bar, .foo.bar {}
group_label = ruleset.selector.as_css()
active_func = self._generate_regular_declarations
return group_label, active_func
def generate(self):
"""Generates all html from the available stylesheet
reference exposed by init function."""
html_inputs = []
for ruleset in self.stylesheet.rules:
group_label = None
is_at_keyword = ruleset.at_keyword is not None
group_label, active_func = self._get_generator(
ruleset, at_keyword=is_at_keyword)
input_html = active_func(ruleset)
selector = ', <br>'.join(group_label.split(','))
code = ' '.join(input_html)
html_inputs.append(
self.container_html.format(
'{', '}', selector=selector, code=code))
# Join all data and populate global property
self._generated_data = ''.join(html_inputs)
return self
def save(self, filename):
if self._generated_data is None:
print('No data has been generated yet!')
return
with open(filename, 'w') as newfile:
newfile.write(self._generated_data)
newfile.write('\n')
newfile.close()
| mit |
geekaia/edx-platform | common/lib/xmodule/xmodule/modulestore/xml_importer.py | 2 | 34084 | import logging
import os
import mimetypes
from path import path
import json
from .xml import XMLModuleStore, ImportSystem, ParentTracker
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xmodule.x_module import XModuleDescriptor
from opaque_keys.edx.keys import UsageKey
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.contentstore.content import StaticContent
from .inheritance import own_metadata
from xmodule.errortracker import make_error_tracker
from .store_utilities import rewrite_nonportable_content_links
import xblock
from xmodule.tabs import CourseTabList
from xmodule.modulestore.exceptions import InvalidLocationError
log = logging.getLogger(__name__)
def import_static_content(
course_data_path, static_content_store,
target_course_id, subpath='static', verbose=False):
remap_dict = {}
# now import all static assets
static_dir = course_data_path / subpath
try:
with open(course_data_path / 'policies/assets.json') as f:
policy = json.load(f)
except (IOError, ValueError) as err:
# xml backed courses won't have this file, only exported courses;
# so, its absence is not really an exception.
policy = {}
verbose = True
mimetypes.add_type('application/octet-stream', '.sjson')
mimetypes.add_type('application/octet-stream', '.srt')
mimetypes_list = mimetypes.types_map.values()
for dirname, _, filenames in os.walk(static_dir):
for filename in filenames:
content_path = os.path.join(dirname, filename)
if filename.endswith('~'):
if verbose:
log.debug('skipping static content %s...', content_path)
continue
if verbose:
log.debug('importing static content %s...', content_path)
try:
with open(content_path, 'rb') as f:
data = f.read()
except IOError:
if filename.startswith('._'):
# OS X "companion files". See
# http://www.diigo.com/annotated/0c936fda5da4aa1159c189cea227e174
continue
# Not a 'hidden file', then re-raise exception
raise
# strip away leading path from the name
fullname_with_subpath = content_path.replace(static_dir, '')
if fullname_with_subpath.startswith('/'):
fullname_with_subpath = fullname_with_subpath[1:]
asset_key = StaticContent.compute_location(target_course_id, fullname_with_subpath)
policy_ele = policy.get(asset_key.path, {})
displayname = policy_ele.get('displayname', filename)
locked = policy_ele.get('locked', False)
mime_type = policy_ele.get('contentType')
# Check extracted contentType in list of all valid mimetypes
if not mime_type or mime_type not in mimetypes_list:
mime_type = mimetypes.guess_type(filename)[0] # Assign guessed mimetype
content = StaticContent(
asset_key, displayname, mime_type, data,
import_path=fullname_with_subpath, locked=locked
)
# first let's save a thumbnail so we can get back a thumbnail location
thumbnail_content, thumbnail_location = static_content_store.generate_thumbnail(content)
if thumbnail_content is not None:
content.thumbnail_location = thumbnail_location
# then commit the content
try:
static_content_store.save(content)
except Exception as err:
log.exception('Error importing {0}, error={1}'.format(
fullname_with_subpath, err
))
# store the remapping information which will be needed
# to subsitute in the module data
remap_dict[fullname_with_subpath] = asset_key
return remap_dict
def import_from_xml(
store, data_dir, course_dirs=None,
default_class='xmodule.raw_module.RawDescriptor',
load_error_modules=True, static_content_store=None,
target_course_id=None, verbose=False, draft_store=None,
do_import_static=True, create_new_course_if_not_present=False):
"""
Import the specified xml data_dir into the "store" modulestore,
using org and course as the location org and course.
course_dirs: If specified, the list of course_dirs to load. Otherwise, load
all course dirs
target_course_id is the CourseKey that all modules should be remapped to
after import off disk. We do this remapping as a post-processing step
because there's logic in the importing which expects a 'url_name' as an
identifier to where things are on disk
e.g. ../policies/<url_name>/policy.json as well as metadata keys in
the policy.json. so we need to keep the original url_name during import
:param do_import_static:
if False, then static files are not imported into the static content
store. This can be employed for courses which have substantial
unchanging static content, which is to inefficient to import every
time the course is loaded. Static content for some courses may also be
served directly by nginx, instead of going through django.
: create_new_course_if_not_present:
If True, then a new course is created if it doesn't already exist.
The check for existing courses is case-insensitive.
"""
xml_module_store = XMLModuleStore(
data_dir,
default_class=default_class,
course_dirs=course_dirs,
load_error_modules=load_error_modules,
xblock_mixins=store.xblock_mixins,
xblock_select=store.xblock_select,
)
# If we're going to remap the course_id, then we can only do that with
# a single course
if target_course_id:
assert(len(xml_module_store.modules) == 1)
# NOTE: the XmlModuleStore does not implement get_items()
# which would be a preferable means to enumerate the entire collection
# of course modules. It will be left as a TBD to implement that
# method on XmlModuleStore.
course_items = []
for course_key in xml_module_store.modules.keys():
if target_course_id is not None:
dest_course_id = target_course_id
else:
dest_course_id = course_key
# Creates a new course if it doesn't already exist
if create_new_course_if_not_present and not store.has_course(dest_course_id, ignore_case=True):
try:
store.create_course(dest_course_id.org, dest_course_id.offering)
except InvalidLocationError:
# course w/ same org and course exists
log.debug(
"Skipping import of course with id, {0},"
"since it collides with an existing one".format(dest_course_id)
)
continue
try:
# turn off all write signalling while importing as this
# is a high volume operation on stores that need it
if hasattr(store, 'ignore_write_events_on_courses'):
store.ignore_write_events_on_courses.add(dest_course_id)
course_data_path = None
if verbose:
log.debug("Scanning {0} for course module...".format(course_key))
# Quick scan to get course module as we need some info from there.
# Also we need to make sure that the course module is committed
# first into the store
for module in xml_module_store.modules[course_key].itervalues():
if module.scope_ids.block_type == 'course':
course_data_path = path(data_dir) / module.data_dir
log.debug(u'======> IMPORTING course {course_key}'.format(
course_key=course_key,
))
if not do_import_static:
# for old-style xblock where this was actually linked to kvs
module.static_asset_path = module.data_dir
module.save()
log.debug('course static_asset_path={path}'.format(
path=module.static_asset_path
))
log.debug('course data_dir={0}'.format(module.data_dir))
course = import_module(
module, store,
course_key,
dest_course_id,
do_import_static=do_import_static
)
for entry in course.pdf_textbooks:
for chapter in entry.get('chapters', []):
if StaticContent.is_c4x_path(chapter.get('url', '')):
asset_key = StaticContent.get_location_from_path(chapter['url'])
chapter['url'] = StaticContent.get_static_path_from_location(asset_key)
# Original wiki_slugs had value location.course. To make them unique this was changed to 'org.course.name'.
# If we are importing into a course with a different course_id and wiki_slug is equal to either of these default
# values then remap it so that the wiki does not point to the old wiki.
if course_key != course.id:
original_unique_wiki_slug = u'{0}.{1}.{2}'.format(
course_key.org,
course_key.course,
course_key.run
)
if course.wiki_slug == original_unique_wiki_slug or course.wiki_slug == course_key.course:
course.wiki_slug = u'{0}.{1}.{2}'.format(
course.id.org,
course.id.course,
course.id.run,
)
# cdodge: more hacks (what else). Seems like we have a
# problem when importing a course (like 6.002) which
# does not have any tabs defined in the policy file.
# The import goes fine and then displays fine in LMS,
# but if someone tries to add a new tab in the CMS, then
# the LMS barfs because it expects that -- if there are
# *any* tabs -- then there at least needs to be
# some predefined ones
if course.tabs is None or len(course.tabs) == 0:
CourseTabList.initialize_default(course)
store.update_item(course)
course_items.append(course)
# then import all the static content
if static_content_store is not None and do_import_static:
# first pass to find everything in /static/
import_static_content(
course_data_path, static_content_store,
dest_course_id, subpath='static', verbose=verbose
)
elif verbose and not do_import_static:
log.debug(
"Skipping import of static content, "
"since do_import_static={0}".format(do_import_static)
)
# no matter what do_import_static is, import "static_import" directory
# This is needed because the "about" pages (eg "overview") are
# loaded via load_extra_content, and do not inherit the lms
# metadata from the course module, and thus do not get
# "static_content_store" properly defined. Static content
# referenced in those extra pages thus need to come through the
# c4x:// contentstore, unfortunately. Tell users to copy that
# content into the "static_import" subdir.
simport = 'static_import'
if os.path.exists(course_data_path / simport):
import_static_content(
course_data_path, static_content_store,
dest_course_id, subpath=simport, verbose=verbose
)
# finally loop through all the modules
for module in xml_module_store.modules[course_key].itervalues():
if module.scope_ids.block_type == 'course':
# we've already saved the course module up at the top
# of the loop so just skip over it in the inner loop
continue
if verbose:
log.debug('importing module location {loc}'.format(
loc=module.location
))
import_module(
module, store,
course_key,
dest_course_id,
do_import_static=do_import_static,
system=course.runtime
)
# now import any 'draft' items
if draft_store is not None:
import_course_draft(
xml_module_store,
store,
draft_store,
course_data_path,
static_content_store,
course_key,
dest_course_id,
course.runtime
)
finally:
# turn back on all write signalling on stores that need it
if (hasattr(store, 'ignore_write_events_on_courses') and
dest_course_id in store.ignore_write_events_on_courses):
store.ignore_write_events_on_courses.remove(dest_course_id)
store.refresh_cached_metadata_inheritance_tree(dest_course_id)
return xml_module_store, course_items
def import_module(
module, store,
source_course_id, dest_course_id,
do_import_static=True, system=None):
logging.debug(u'processing import of module {}...'.format(module.location.to_deprecated_string()))
if do_import_static and 'data' in module.fields and isinstance(module.fields['data'], xblock.fields.String):
# we want to convert all 'non-portable' links in the module_data
# (if it is a string) to portable strings (e.g. /static/)
module.data = rewrite_nonportable_content_links(
source_course_id,
dest_course_id,
module.data
)
# Move the module to a new course
new_usage_key = module.scope_ids.usage_id.map_into_course(dest_course_id)
if new_usage_key.category == 'course':
new_usage_key = new_usage_key.replace(name=dest_course_id.run)
new_module = store.create_xmodule(new_usage_key, system=system)
def _convert_reference_fields_to_new_namespace(reference):
"""
Convert a reference to the new namespace, but only
if the original namespace matched the original course.
Otherwise, returns the input value.
"""
assert isinstance(reference, UsageKey)
if source_course_id == reference.course_key:
return reference.map_into_course(dest_course_id)
else:
return reference
for field_name, field in module.fields.iteritems():
if field.is_set_on(module):
if isinstance(field, Reference):
new_ref = _convert_reference_fields_to_new_namespace(getattr(module, field_name))
setattr(new_module, field_name, new_ref)
elif isinstance(field, ReferenceList):
references = getattr(module, field_name)
new_references = [_convert_reference_fields_to_new_namespace(reference) for reference in references]
setattr(new_module, field_name, new_references)
elif isinstance(field, ReferenceValueDict):
reference_dict = getattr(module, field_name)
new_reference_dict = {
key: _convert_reference_fields_to_new_namespace(reference)
for key, reference
in reference_dict.items()
}
setattr(new_module, field_name, new_reference_dict)
elif field_name == 'xml_attributes':
value = getattr(module, field_name)
# remove any export/import only xml_attributes
# which are used to wire together draft imports
if 'parent_sequential_url' in value:
del value['parent_sequential_url']
if 'index_in_children_list' in value:
del value['index_in_children_list']
setattr(new_module, field_name, value)
else:
setattr(new_module, field_name, getattr(module, field_name))
store.update_item(new_module, '**replace_user**', allow_not_found=True)
return new_module
def import_course_draft(
xml_module_store, store, draft_store, course_data_path,
static_content_store, source_course_id,
target_course_id, mongo_runtime):
'''
This will import all the content inside of the 'drafts' folder, if it exists
NOTE: This is not a full course import, basically in our current
application only verticals (and downwards) can be in draft.
Therefore, we need to use slightly different call points into
the import process_xml as we can't simply call XMLModuleStore() constructor
(like we do for importing public content)
'''
draft_dir = course_data_path + "/drafts"
if not os.path.exists(draft_dir):
return
# create a new 'System' object which will manage the importing
errorlog = make_error_tracker()
# The course_dir as passed to ImportSystem is expected to just be relative, not
# the complete path including data_dir. ImportSystem will concatenate the two together.
data_dir = xml_module_store.data_dir
# Whether or not data_dir ends with a "/" differs in production vs. test.
if not data_dir.endswith("/"):
data_dir += "/"
draft_course_dir = draft_dir.replace(data_dir, '', 1)
system = ImportSystem(
xmlstore=xml_module_store,
course_id=target_course_id,
course_dir=draft_course_dir,
error_tracker=errorlog.tracker,
parent_tracker=ParentTracker(),
load_error_modules=False,
mixins=xml_module_store.xblock_mixins,
field_data=KvsFieldData(kvs=DictKeyValueStore()),
)
# now walk the /vertical directory where each file in there
# will be a draft copy of the Vertical
# First it is necessary to order the draft items by their desired index in the child list
# (order os.walk returns them in is not guaranteed).
drafts = dict()
for dirname, _dirnames, filenames in os.walk(draft_dir + "/vertical"):
for filename in filenames:
module_path = os.path.join(dirname, filename)
with open(module_path, 'r') as f:
try:
# note, on local dev it seems like OSX will put
# some extra files in the directory with "quarantine"
# information. These files are binary files and will
# throw exceptions when we try to parse the file
# as an XML string. Let's make sure we're
# dealing with a string before ingesting
data = f.read()
try:
xml = data.decode('utf-8')
except UnicodeDecodeError, err:
# seems like on OSX localdev, the OS is making
# quarantine files in the unzip directory
# when importing courses so if we blindly try to
# enumerate through the directory, we'll try
# to process a bunch of binary quarantine files
# (which are prefixed with a '._' character which
# will dump a bunch of exceptions to the output,
# although they are harmless.
#
# Reading online docs there doesn't seem to be
# a good means to detect a 'hidden' file that works
# well across all OS environments. So for now, I'm using
# OSX's utilization of a leading '.' in the filename
# to indicate a system hidden file.
#
# Better yet would be a way to figure out if this is
# a binary file, but I haven't found a good way
# to do this yet.
if filename.startswith('._'):
continue
# Not a 'hidden file', then re-raise exception
raise err
descriptor = system.process_xml(xml)
# HACK: since we are doing partial imports of drafts
# the vertical doesn't have the 'url-name' set in the
# attributes (they are normally in the parent object,
# aka sequential), so we have to replace the location.name
# with the XML filename that is part of the pack
fn, fileExtension = os.path.splitext(filename)
descriptor.location = descriptor.location.replace(name=fn)
index = int(descriptor.xml_attributes['index_in_children_list'])
if index in drafts:
drafts[index].append(descriptor)
else:
drafts[index] = [descriptor]
except Exception:
logging.exception('Error while parsing course xml.')
# For each index_in_children_list key, there is a list of vertical descriptors.
for key in sorted(drafts.iterkeys()):
for descriptor in drafts[key]:
course_key = descriptor.location.course_key
try:
def _import_module(module):
# Update the module's location to "draft" revision
# We need to call this method (instead of updating the location directly)
# to ensure that pure XBlock field data is updated correctly.
_update_module_location(module, module.location.replace(revision='draft'))
# make sure our parent has us in its list of children
# this is to make sure private only verticals show up
# in the list of children since they would have been
# filtered out from the non-draft store export
if module.location.category == 'vertical':
non_draft_location = module.location.replace(revision=None)
sequential_url = module.xml_attributes['parent_sequential_url']
index = int(module.xml_attributes['index_in_children_list'])
seq_location = course_key.make_usage_key_from_deprecated_string(sequential_url)
# IMPORTANT: Be sure to update the sequential
# in the NEW namespace
seq_location = seq_location.map_into_course(target_course_id)
sequential = store.get_item(seq_location, depth=0)
if non_draft_location not in sequential.children:
sequential.children.insert(index, non_draft_location)
store.update_item(sequential, '**replace_user**')
import_module(
module, draft_store,
source_course_id,
target_course_id, system=mongo_runtime
)
for child in module.get_children():
_import_module(child)
_import_module(descriptor)
except Exception:
logging.exception('There while importing draft descriptor %s', descriptor)
def allowed_metadata_by_category(category):
# should this be in the descriptors?!?
return {
'vertical': [],
'chapter': ['start'],
'sequential': ['due', 'format', 'start', 'graded']
}.get(category, ['*'])
def check_module_metadata_editability(module):
'''
Assert that there is no metadata within a particular module that
we can't support editing. However we always allow 'display_name'
and 'xml_attributes'
'''
allowed = allowed_metadata_by_category(module.location.category)
if '*' in allowed:
# everything is allowed
return 0
allowed = allowed + ['xml_attributes', 'display_name']
err_cnt = 0
illegal_keys = set(own_metadata(module).keys()) - set(allowed)
if len(illegal_keys) > 0:
err_cnt = err_cnt + 1
print(
": found non-editable metadata on {url}. "
"These metadata keys are not supported = {keys}".format(
url=module.location.to_deprecated_string(), keys=illegal_keys
)
)
return err_cnt
def validate_no_non_editable_metadata(module_store, course_id, category):
err_cnt = 0
for module_loc in module_store.modules[course_id]:
module = module_store.modules[course_id][module_loc]
if module.location.category == category:
err_cnt = err_cnt + check_module_metadata_editability(module)
return err_cnt
def validate_category_hierarchy(
module_store, course_id, parent_category, expected_child_category):
err_cnt = 0
parents = []
# get all modules of parent_category
for module in module_store.modules[course_id].itervalues():
if module.location.category == parent_category:
parents.append(module)
for parent in parents:
for child_loc in parent.children:
if child_loc.category != expected_child_category:
err_cnt += 1
print(
"ERROR: child {child} of parent {parent} was expected to be "
"category of {expected} but was {actual}".format(
child=child_loc, parent=parent.location,
expected=expected_child_category,
actual=child_loc.category
)
)
return err_cnt
def validate_data_source_path_existence(path, is_err=True, extra_msg=None):
_cnt = 0
if not os.path.exists(path):
print(
"{type}: Expected folder at {path}. {extra}".format(
type='ERROR' if is_err else 'WARNING',
path=path,
extra=extra_msg or "",
)
)
_cnt = 1
return _cnt
def validate_data_source_paths(data_dir, course_dir):
# check that there is a '/static/' directory
course_path = data_dir / course_dir
err_cnt = 0
warn_cnt = 0
err_cnt += validate_data_source_path_existence(course_path / 'static')
warn_cnt += validate_data_source_path_existence(
course_path / 'static/subs', is_err=False,
extra_msg='Video captions (if they are used) will not work unless they are static/subs.'
)
return err_cnt, warn_cnt
def validate_course_policy(module_store, course_id):
"""
Validate that the course explicitly sets values for any fields
whose defaults may have changed between the export and the import.
Does not add to error count as these are just warnings.
"""
# is there a reliable way to get the module location just given the course_id?
warn_cnt = 0
for module in module_store.modules[course_id].itervalues():
if module.location.category == 'course':
if not module._field_data.has(module, 'rerandomize'):
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"rerandomize" whose default is now "never". '
'The behavior of your course may change.'
)
if not module._field_data.has(module, 'showanswer'):
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"showanswer" whose default is now "finished". '
'The behavior of your course may change.'
)
return warn_cnt
def perform_xlint(
data_dir, course_dirs,
default_class='xmodule.raw_module.RawDescriptor',
load_error_modules=True):
err_cnt = 0
warn_cnt = 0
module_store = XMLModuleStore(
data_dir,
default_class=default_class,
course_dirs=course_dirs,
load_error_modules=load_error_modules
)
# check all data source path information
for course_dir in course_dirs:
_err_cnt, _warn_cnt = validate_data_source_paths(path(data_dir), course_dir)
err_cnt += _err_cnt
warn_cnt += _warn_cnt
# first count all errors and warnings as part of the XMLModuleStore import
for err_log in module_store._course_errors.itervalues():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
# then count outright all courses that failed to load at all
for err_log in module_store.errored_courses.itervalues():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
print(msg)
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
for course_id in module_store.modules.keys():
# constrain that courses only have 'chapter' children
err_cnt += validate_category_hierarchy(
module_store, course_id, "course", "chapter"
)
# constrain that chapters only have 'sequentials'
err_cnt += validate_category_hierarchy(
module_store, course_id, "chapter", "sequential"
)
# constrain that sequentials only have 'verticals'
err_cnt += validate_category_hierarchy(
module_store, course_id, "sequential", "vertical"
)
# validate the course policy overrides any defaults
# which have changed over time
warn_cnt += validate_course_policy(module_store, course_id)
# don't allow metadata on verticals, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "vertical"
)
# don't allow metadata on chapters, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "chapter"
)
# don't allow metadata on sequences that we can't edit
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "sequential"
)
# check for a presence of a course marketing video
if not module_store.has_item(course_id.make_usage_key('about', 'video')):
print(
"WARN: Missing course marketing video. It is recommended "
"that every course have a marketing video."
)
warn_cnt += 1
print("\n")
print("------------------------------------------")
print("VALIDATION SUMMARY: {err} Errors {warn} Warnings".format(
err=err_cnt, warn=warn_cnt)
)
if err_cnt > 0:
print(
"This course is not suitable for importing. Please fix courseware "
"according to specifications before importing."
)
elif warn_cnt > 0:
print(
"This course can be imported, but some errors may occur "
"during the run of the course. It is recommend that you fix "
"your courseware before importing"
)
else:
print("This course can be imported successfully.")
return err_cnt
def _update_module_location(module, new_location):
"""
Update a module's location.
If the module is a pure XBlock (not an XModule), then its field data
keys will need to be updated to include the new location.
Args:
module (XModuleMixin): The module to update.
new_location (Location): The new location of the module.
Returns:
None
"""
# Retrieve the content and settings fields that have been explicitly set
# to ensure that they are properly re-keyed in the XBlock field data.
if isinstance(module, XModuleDescriptor):
rekey_fields = []
else:
rekey_fields = (
module.get_explicitly_set_fields_by_scope(Scope.content).keys() +
module.get_explicitly_set_fields_by_scope(Scope.settings).keys()
)
module.location = new_location
# Pure XBlocks store the field data in a key-value store
# in which one component of the key is the XBlock's location (equivalent to "scope_ids").
# Since we've changed the XBlock's location, we need to re-save
# all the XBlock's fields so they will be stored using the new location in the key.
# However, since XBlocks only save "dirty" fields, we need to first
# explicitly set each field to its current value before triggering the save.
if len(rekey_fields) > 0:
for rekey_field_name in rekey_fields:
setattr(module, rekey_field_name, getattr(module, rekey_field_name))
module.save()
| agpl-3.0 |
linucks/ample | ample/util/benchmark_util.py | 1 | 21169 | """
Created on 24 Oct 2014
@author: jmht
"""
import copy
import glob
import logging
import os
import pandas as pd
import shutil
import sys
from ample.util import ample_util, csymmatch, mtz_util, pdb_edit, pdb_model, reforigin, residue_map, rio, shelxe, tm_util
logger = logging.getLogger(__name__)
_oldroot = None
_newroot = None
SHELXE_STEM = 'shelxe'
_CSV_KEYLIST = [
'ample_version',
# Native info
'native_pdb_code',
'native_pdb_title',
'native_pdb_resolution',
'native_pdb_solvent_content',
'native_pdb_space_group',
'native_pdb_num_atoms',
'native_pdb_num_residues',
'native_pdb_num_chains',
# The modelled sequence
'fasta_length',
# Get the ensemble data and add to the MRBUMP data
'ensemble_name',
'ensemble_percent_model',
# cluster info
'cluster_method',
'num_clusters',
'cluster_num',
'cluster_centroid',
'cluster_num_models',
# truncation info
'truncation_level',
'percent_truncation',
'truncation_method',
'truncation_pruning',
'truncation_variance',
'num_residues',
'pruned_residues',
# subclustering info
'subcluster_num_models',
'subcluster_radius_threshold',
'subcluster_centroid_model',
'subcluster_centroid_model_RMSD',
'subcluster_centroid_model_TM',
# ensemble info
# 'name',
'side_chain_treatment',
'ensemble_num_atoms',
# MR result info
# 'name',
'MR_program',
'Solution_Type',
'PHASER_LLG',
'PHASER_TFZ',
'PHASER_RFZ',
'PHASER_time',
'PHASER_killed',
'PHASER_version',
'PHASER_errors',
'MOLREP_score',
'MOLREP_time',
'MOLREP_version',
'MR_MPE',
'MR_wMPE',
'REFMAC_Rfact',
'REFMAC_Rfree',
# 'REFMAC_MPE',
# 'REFMAC_wMPE',
'REFMAC_version',
'BUCC_final_Rfact',
'BUCC_final_Rfree',
'BUCC_version',
'ARP_final_Rfact',
'ARP_final_Rfree',
'ARP_version',
'SHELXE_CC',
'SHELXE_ACL',
'SHELXE_MCL',
'SHELXE_NC',
'SHELXE_wPE',
'SHELXE_wMPE',
'SHELXE_os',
'SHELXE_time',
'SHELXE_version',
'SXRBUCC_version',
'SXRBUCC_final_Rfact',
'SXRBUCC_final_Rfree',
'SXRBUCC_MPE',
'SXRBUCC_wMPE',
'SXRARP_version',
'SXRARP_final_Rfact',
'SXRARP_final_Rfree',
'SXRARP_MPE',
'SXRARP_wMPE',
'num_placed_chains',
'num_placed_atoms',
'reforigin_RMSD',
'AA_num_contacts',
'RIO_num_contacts',
'RIO_in_register',
'RIO_oo_register',
'RIO_backwards',
'RIO',
'RIO_no_cat',
'RIO_norm',
]
def analyse(amoptd, newroot=None):
if newroot:
assert os.path.isdir(newroot)
global _oldroot, _newroot
_newroot = newroot
_oldroot = amoptd['work_dir']
if not os.path.isdir(fixpath(amoptd['benchmark_dir'])):
os.mkdir(fixpath(amoptd['benchmark_dir']))
os.chdir(fixpath(amoptd['benchmark_dir']))
# AnalysePdb may have already been called from the main script
if amoptd['native_pdb'] and 'native_pdb_std' not in amoptd:
analysePdb(amoptd)
if amoptd['native_pdb_std']:
# Generate an SHELXE HKL and ENT file so that we can calculate phase errors
mtz_util.to_hkl(amoptd['mtz'], hkl_file=os.path.join(amoptd['benchmark_dir'], SHELXE_STEM + ".hkl"))
shutil.copyfile(amoptd['native_pdb_std'], os.path.join(amoptd['benchmark_dir'], SHELXE_STEM + ".ent"))
if amoptd['native_pdb'] and not (
amoptd['homologs'] or amoptd['ideal_helices'] or amoptd['import_ensembles'] or amoptd['single_model_mode']
):
analyseModels(amoptd)
# Get the ensembling data
if 'ensembles_data' not in amoptd or not len(amoptd['ensembles_data']):
logger.critical("Benchmark cannot find any ensemble data!")
return
# Get dict of ensemble name -> ensemble result
ensemble_results = {e['name']: e for e in amoptd['ensembles_data']}
# Get mrbump_results for cluster
if 'mrbump_results' not in amoptd or not len(amoptd['mrbump_results']):
logger.critical("Benchmark cannot find any mrbump results!")
return
data = []
mrinfo = shelxe.MRinfo(amoptd['shelxe_exe'], amoptd['native_pdb_info'].pdb, amoptd['mtz'])
for result in amoptd['mrbump_results']:
# use mrbump dict as basis for result object
d = copy.copy(result)
# Add in the data from the ensemble
d.update(ensemble_results[d['ensemble_name']])
assert d['ensemble_name'] == d['name'], d
# Hack for old results
if 'truncation_num_residues' in d:
d['num_residues'] = d['truncation_num_residues']
del d['truncation_num_residues']
# Hack for ideal helices where num_residues are missing
if amoptd['ideal_helices'] and ('num_residues' not in d or d['num_residues'] is None):
d['num_residues'] = int(d['ensemble_name'].lstrip('polyala'))
# Get the ensemble data and add to the MRBUMP data
d['ensemble_percent_model'] = int((float(d['num_residues']) / float(amoptd['fasta_length'])) * 100)
if amoptd['native_pdb']:
# Add in stuff we've cleaned from the pdb
native_keys = [
'native_pdb_code',
'native_pdb_title',
'native_pdb_resolution',
'native_pdb_solvent_content',
'native_pdb_space_group',
'native_pdb_num_chains',
'native_pdb_num_atoms',
'native_pdb_num_residues',
]
d.update({key: amoptd[key] for key in native_keys})
# Analyse the solution
analyseSolution(amoptd, d, mrinfo)
data.append(d)
# Put everything in a pandas DataFrame
dframe = pd.DataFrame(data)
# General stuff
dframe['ample_version'] = amoptd['ample_version']
dframe['fasta_length'] = amoptd['fasta_length']
# Analyse subcluster centroid models
if 'subcluster_centroid_model' in dframe.columns and amoptd['native_pdb']:
centroid_index = dframe.index
centroid_models = [fixpath(f) for f in dframe.subcluster_centroid_model]
native_pdb_std = fixpath(amoptd['native_pdb_std'])
fasta = fixpath(amoptd['fasta'])
# Calculation of TMscores for subcluster centroid models
if amoptd['have_tmscore']:
tm = tm_util.TMscore(amoptd['tmscore_exe'], wdir=fixpath(amoptd['benchmark_dir']), **amoptd)
tm_results = tm.compare_structures(centroid_models, [native_pdb_std], [fasta])
centroid_tmscores = [r['tmscore'] for r in tm_results]
centroid_rmsds = [r['rmsd'] for r in tm_results]
else:
raise RuntimeError("No program to calculate tmscores!")
dframe['subcluster_centroid_model_TM'] = pd.Series(centroid_tmscores, index=centroid_index)
dframe['subcluster_centroid_model_RMSD'] = pd.Series(centroid_rmsds, index=centroid_index)
# Save the data
file_name = os.path.join(fixpath(amoptd['benchmark_dir']), 'results.csv')
dframe.to_csv(file_name, columns=_CSV_KEYLIST, index=False, na_rep="N/A")
amoptd['benchmark_results'] = dframe.to_dict('records')
return
def analyseModels(amoptd):
# Get hold of a full model so we can do the mapping of residues
refModelPdb = glob.glob(os.path.join(amoptd['models_dir'], "*.pdb"))[0]
nativePdbInfo = amoptd['native_pdb_info']
refModelPdbInfo = pdb_edit.get_info(refModelPdb)
amoptd['ref_model_pdb_info'] = refModelPdbInfo
try:
resSeqMap = residue_map.residueSequenceMap()
resSeqMap.fromInfo(
refInfo=refModelPdbInfo,
refChainID=refModelPdbInfo.models[0].chains[0], # Only 1 chain in model
targetInfo=nativePdbInfo,
targetChainID=nativePdbInfo.models[0].chains[0],
)
amoptd['res_seq_map'] = resSeqMap
except Exception as e:
logger.exception("Error calculating resSeqMap: %s" % e)
amoptd['res_seq_map'] = None # Won't be able to calculate RIO scores
if amoptd['have_tmscore']:
try:
tm = tm_util.TMscore(amoptd['tmscore_exe'], wdir=fixpath(amoptd['benchmark_dir']))
# Calculation of TMscores for all models
logger.info("Analysing Rosetta models with TMscore")
model_list = sorted(glob.glob(os.path.join(amoptd['models_dir'], "*pdb")))
structure_list = [amoptd['native_pdb_std']]
amoptd['tmComp'] = tm.compare_structures(model_list, structure_list, fastas=[amoptd['fasta']])
except Exception as e:
logger.exception("Unable to run TMscores: %s", e)
else:
raise RuntimeError("No program to calculate TMSCORES")
def analysePdb(amoptd):
"""Collect data on the native pdb structure"""
nativePdb = fixpath(amoptd['native_pdb'])
nativePdbInfo = pdb_edit.get_info(nativePdb)
# number atoms/residues
natoms, nresidues = pdb_edit.num_atoms_and_residues(nativePdb)
# Get information on the origins for this spaceGroup
try:
originInfo = pdb_model.OriginInfo(spaceGroupLabel=nativePdbInfo.crystalInfo.spaceGroup)
except Exception:
originInfo = None
# Do this here as a bug in pdbcur can knacker the CRYST1 data
amoptd['native_pdb_code'] = nativePdbInfo.pdbCode
amoptd['native_pdb_title'] = nativePdbInfo.title
amoptd['native_pdb_resolution'] = nativePdbInfo.resolution
amoptd['native_pdb_solvent_content'] = nativePdbInfo.solventContent
amoptd['native_pdb_matthews_coefficient'] = nativePdbInfo.matthewsCoefficient
if not originInfo:
space_group = "P1"
else:
space_group = originInfo.spaceGroup()
amoptd['native_pdb_space_group'] = space_group
amoptd['native_pdb_num_atoms'] = natoms
amoptd['native_pdb_num_residues'] = nresidues
# First check if the native has > 1 model and extract the first if so
if len(nativePdbInfo.models) > 1:
logger.info("nativePdb has > 1 model - using first")
nativePdb1 = ample_util.filename_append(
filename=nativePdb, astr="model1", directory=fixpath(amoptd['work_dir'])
)
pdb_edit.extract_model(nativePdb, nativePdb1, modelID=nativePdbInfo.models[0].serial)
nativePdb = nativePdb1
# Standardise the PDB to rename any non-standard AA, remove solvent etc
nativePdbStd = ample_util.filename_append(filename=nativePdb, astr="std", directory=fixpath(amoptd['work_dir']))
pdb_edit.standardise(nativePdb, nativePdbStd, del_hetatm=True)
nativePdb = nativePdbStd
# Get the new Info about the native
nativePdbInfo = pdb_edit.get_info(nativePdb)
# For comparsion of shelxe model we need a single chain from the native so we get this here
if len(nativePdbInfo.models[0].chains) > 1:
nativeChain1 = ample_util.filename_append(
filename=nativePdbInfo.pdb, astr="chain1", directory=fixpath(amoptd['work_dir'])
)
pdb_edit.merge_chains(nativePdbInfo.pdb, nativeChain1)
else:
nativeChain1 = nativePdbInfo.pdb
# Additional data
amoptd['native_pdb_num_chains'] = len(nativePdbInfo.models[0].chains)
amoptd['native_pdb_info'] = nativePdbInfo
amoptd['native_pdb_std'] = nativePdbStd
amoptd['native_pdb_1chain'] = nativeChain1
amoptd['native_pdb_origin_info'] = originInfo
return
def analyseSolution(amoptd, d, mrinfo):
logger.info("Benchmark: analysing result: {0}".format(d['ensemble_name']))
mrPdb = None
if d['MR_program'] == "PHASER":
mrPdb = d['PHASER_pdbout']
mrMTZ = d['PHASER_mtzout']
elif d['MR_program'] == "MOLREP":
mrPdb = d['MOLREP_pdbout']
elif d['MR_program'] == "unknown":
return
if mrPdb is None or not os.path.isfile(mrPdb):
# logger.critical("Cannot find mrPdb {0} for solution {1}".format(mrPdb,d))
return
# debug - copy into work directory as reforigin struggles with long pathnames
shutil.copy(mrPdb, os.path.join(fixpath(amoptd['benchmark_dir']), os.path.basename(mrPdb)))
mrPdbInfo = pdb_edit.get_info(mrPdb)
d['num_placed_chains'] = mrPdbInfo.numChains()
d['num_placed_atoms'] = mrPdbInfo.numAtoms()
d['num_placed_CA'] = mrPdbInfo.numCalpha()
if amoptd['native_pdb']:
if not d['SHELXE_os']:
logger.critical("mrPdb {0} has no SHELXE_os origin shift. Calculating...".format(mrPdb))
mrinfo.analyse(mrPdb)
mrOrigin = mrinfo.originShift
d['SHELXE_MPE'] = mrinfo.MPE
d['SHELXE_wMPE'] = mrinfo.wMPE
else:
mrOrigin = [c * -1 for c in d['SHELXE_os']]
# Move pdb onto new origin
originPdb = ample_util.filename_append(mrPdb, astr='offset', directory=fixpath(amoptd['benchmark_dir']))
pdb_edit.translate(mrPdb, originPdb, mrOrigin)
# offset.pdb is the mrModel shifted onto the new origin use csymmatch to wrap onto native
csymmatch.Csymmatch().wrapModelToNative(
originPdb,
amoptd['native_pdb'],
csymmatchPdb=os.path.join(
fixpath(amoptd['benchmark_dir']), "phaser_{0}_csymmatch.pdb".format(d['ensemble_name'])
),
)
# can now delete origin pdb
os.unlink(originPdb)
# Calculate phase error for the MR PDB
try:
mrinfo.analyse(mrPdb)
d['MR_MPE'] = mrinfo.MPE
d['MR_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing mrPdb: {0}\n{1}".format(mrPdb, e))
# We cannot calculate the Reforigin RMSDs or RIO scores for runs where we don't have a full initial model
# to compare to the native to allow us to determine which parts of the ensemble correspond to which parts of
# the native structure - or if we were unable to calculate a res_seq_map
if not (
amoptd['homologs']
or amoptd['ideal_helices']
or amoptd['import_ensembles']
or amoptd['single_model_mode']
or amoptd['res_seq_map']
):
# Get reforigin info
rmsder = reforigin.ReforiginRmsd()
try:
rmsder.getRmsd(
nativePdbInfo=amoptd['native_pdb_info'],
placedPdbInfo=mrPdbInfo,
refModelPdbInfo=amoptd['ref_model_pdb_info'],
cAlphaOnly=True,
workdir=fixpath(amoptd['benchmark_dir']),
)
d['reforigin_RMSD'] = rmsder.rmsd
except Exception as e:
logger.critical("Error calculating RMSD: {0}".format(e))
d['reforigin_RMSD'] = 999
# Score the origin with all-atom and rio
rioData = rio.Rio().scoreOrigin(
mrOrigin,
mrPdbInfo=mrPdbInfo,
nativePdbInfo=amoptd['native_pdb_info'],
resSeqMap=amoptd['res_seq_map'],
workdir=fixpath(amoptd['benchmark_dir']),
)
# Set attributes
d['AA_num_contacts'] = rioData.aaNumContacts
d['RIO_num_contacts'] = rioData.rioNumContacts
d['RIO_in_register'] = rioData.rioInRegister
d['RIO_oo_register'] = rioData.rioOoRegister
d['RIO_backwards'] = rioData.rioBackwards
d['RIO'] = rioData.rioInRegister + rioData.rioOoRegister
d['RIO_no_cat'] = rioData.rioNumContacts - (rioData.rioInRegister + rioData.rioOoRegister)
d['RIO_norm'] = float(d['RIO']) / float(d['native_pdb_num_residues'])
else:
d['AA_num_contacts'] = None
d['RIO_num_contacts'] = None
d['RIO_in_register'] = None
d['RIO_oo_register'] = None
d['RIO_backwards'] = None
d['RIO'] = None
d['RIO_no_cat'] = None
d['RIO_norm'] = None
# # Now get the helix
# helixSequence = contacts.Rio().helixFromContacts( contacts=rioData.contacts,
# dsspLog=dsspLog )
# if helixSequence is not None:
# ampleResult.rioHelixSequence = helixSequence
# ampleResult.rioLenHelix = len( helixSequence )
# hfile = os.path.join( workdir, "{0}.helix".format( ampleResult.ensembleName ) )
# with open( hfile, 'w' ) as f:
# f.write( helixSequence+"\n" )
#
# This purely for checking and so we have pdbs to view
#
# Wrap shelxe trace onto native using Csymmatch
if not d['SHELXE_pdbout'] is None and os.path.isfile(fixpath(d['SHELXE_pdbout'])):
csymmatch.Csymmatch().wrapModelToNative(
fixpath(d['SHELXE_pdbout']),
amoptd['native_pdb'],
origin=mrOrigin,
workdir=fixpath(amoptd['benchmark_dir']),
)
if not ('SHELXE_wMPE' in d and d['SHELXE_wMPE']):
try:
mrinfo.analyse(d['SHELXE_pdbout'])
d['SHELXE_MPE'] = mrinfo.MPE
d['SHELXE_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing SHELXE_pdbout: {0}\n{1}".format(d['SHELXE_pdbout'], e))
# Wrap parse_buccaneer model onto native
if d['SXRBUCC_pdbout'] and os.path.isfile(fixpath(d['SXRBUCC_pdbout'])):
# Need to rename Pdb as is just called buccSX_output.pdb
csymmatchPdb = os.path.join(
fixpath(amoptd['benchmark_dir']), "buccaneer_{0}_csymmatch.pdb".format(d['ensemble_name'])
)
csymmatch.Csymmatch().wrapModelToNative(
fixpath(d['SXRBUCC_pdbout']),
amoptd['native_pdb'],
origin=mrOrigin,
csymmatchPdb=csymmatchPdb,
workdir=fixpath(amoptd['benchmark_dir']),
)
# Calculate phase error
try:
mrinfo.analyse(d['SXRBUCC_pdbout'])
d['SXRBUCC_MPE'] = mrinfo.MPE
d['SXRBUCC_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing SXRBUCC_pdbout: {0}\n{1}".format(d['SXRBUCC_pdbout'], e))
# Wrap parse_buccaneer model onto native
if d['SXRARP_pdbout'] and os.path.isfile(fixpath(d['SXRARP_pdbout'])):
# Need to rename Pdb as is just called buccSX_output.pdb
csymmatchPdb = os.path.join(
fixpath(amoptd['benchmark_dir']), "arpwarp_{0}_csymmatch.pdb".format(d['ensemble_name'])
)
csymmatch.Csymmatch().wrapModelToNative(
fixpath(d['SXRARP_pdbout']),
amoptd['native_pdb'],
origin=mrOrigin,
csymmatchPdb=csymmatchPdb,
workdir=fixpath(amoptd['benchmark_dir']),
)
# Calculate phase error
try:
mrinfo.analyse(d['SXRARP_pdbout'])
d['SXRARP_MPE'] = mrinfo.MPE
d['SXRARP_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing SXRARP_pdbout: {0}\n{1}".format(d['SXRARP_pdbout'], e))
return
def cluster_script(amoptd, python_path="ccp4-python"):
"""Create the script for benchmarking on a cluster"""
# write out script
work_dir = amoptd['work_dir']
script_path = os.path.join(work_dir, "submit_benchmark.sh")
with open(script_path, "w") as job_script:
job_script.write("#!/bin/sh\n")
# Find path to this directory to get path to python ensemble.py script
pydir = os.path.abspath(os.path.dirname(__file__))
benchmark_script = os.path.join(pydir, "benchmark_util.py")
job_script.write("{0} {1} {2} {3}\n".format(python_path, "-u", benchmark_script, amoptd['results_path']))
# Make executable
os.chmod(script_path, 0o777)
return script_path
def fixpath(path):
# fix for analysing on a different machine
if _oldroot and _newroot:
return path.replace(_oldroot, _newroot)
else:
return path
# Run unit tests
if __name__ == "__main__":
# Set up logging - could append to an existing log?
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# This runs the benchmarking starting from a pickled file containing an amopt dictionary.
# - used when submitting the modelling jobs to a cluster
if len(sys.argv) != 2 or not os.path.isfile(sys.argv[1]):
logging.debug("benchmark script requires the path to a pickled amopt dictionary!")
sys.exit(1)
# Get the amopt dictionary
amoptd = ample_util.read_amoptd(sys.argv[1])
fl = logging.FileHandler(os.path.join(amoptd['work_dir'], "benchmark.log"))
fl.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fl.setFormatter(formatter)
logger.addHandler(fl)
analyse(amoptd)
ample_util.save_amoptd(amoptd)
| bsd-3-clause |
jijo-paulose/django-profile | userprofile/forms.py | 8 | 5911 | from django import forms
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib.auth.models import User, SiteProfileNotAvailable
from userprofile.models import EmailValidation, AVATAR_SIZES, DEFAULT_AVATAR_SIZE, MIN_AVATAR_SIZE
from django.core.files.uploadedfile import SimpleUploadedFile
import mimetypes, urllib
from django.contrib.auth.forms import UserCreationForm
if not settings.AUTH_PROFILE_MODULE:
raise SiteProfileNotAvailable
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
Profile = models.get_model(app_label, model_name)
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
class LocationForm(forms.ModelForm):
"""
Profile location form
"""
class Meta:
model = Profile
fields = ('location', 'latitude', 'longitude', 'country')
class ProfileForm(forms.ModelForm):
"""
Profile Form. Composed by all the Profile model fields.
"""
class Meta:
model = Profile
exclude = ('creation_date', 'location', 'latitude', 'longitude', 'country',
'user', 'public', 'site')
class PublicFieldsForm(forms.ModelForm):
"""
Public Fields of the Profile Form. Composed by all the Profile model fields.
"""
class Meta:
model = Profile
exclude = ('creation_date', 'user', 'public')
class AvatarForm(forms.Form):
"""
The avatar form requires only one image field.
"""
photo = forms.ImageField(required=False, help_text=_("Select an image from disk"))
url = forms.URLField(required=False, help_text=_("Select an image from a remote URL. Put the URL on the input below and we'll retrieve the image for you"))
def clean_url(self):
url = self.cleaned_data.get('url')
if not url: return ''
try:
filename, headers = urllib.urlretrieve(url)
except:
raise forms.ValidationError(forms.fields.URLField.default_error_messages['invalid_link'])
if not mimetypes.guess_all_extensions(headers.get('Content-Type')):
raise forms.ValidationError(_('The file type is invalid: %s' % type))
return SimpleUploadedFile(filename, open(filename).read(), content_type=headers.get('Content-Type'))
def clean(self):
if not (self.cleaned_data.get('photo') or self.cleaned_data.get('url')):
raise forms.ValidationError(_('You must enter one of the options'))
return self.cleaned_data
class AvatarCoordinateField(forms.IntegerField):
def clean(self, value):
try:
value = int(float(value))
except:
pass
return super(AvatarCoordinateField, self).clean(value)
class AvatarCropForm(forms.Form):
"""
Crop dimensions form
"""
top = AvatarCoordinateField()
bottom = AvatarCoordinateField()
left = AvatarCoordinateField()
right = AvatarCoordinateField()
def __init__(self, image=None, *args, **kwargs):
self.image = image
super(AvatarCropForm, self).__init__(*args, **kwargs)
def clean(self):
if self.cleaned_data.get('right') and self.cleaned_data.get('left') and \
int(self.cleaned_data.get('right')) - int(self.cleaned_data.get('left')) < MIN_AVATAR_SIZE:
raise forms.ValidationError(_("You must select a portion of the image with a minimum of %(min_avatar_size)dx%(min_avatar_size)d pixels.") % { 'min_avatar_size': MIN_AVATAR_SIZE })
return self.cleaned_data
class RegistrationForm(UserCreationForm):
email = forms.EmailField(required=getattr(settings, "REQUIRE_EMAIL_CONFIRMATION", False), label=_("E-mail address"))
class Meta:
model = User
fields = ("username","email",)
def clean_email(self):
"""
Verify that the email exists
"""
email = self.cleaned_data.get("email")
if not email: return email
try:
User.objects.get(email__iexact=email)
raise forms.ValidationError(_("That e-mail is already used."))
except User.DoesNotExist:
try:
EmailValidation.objects.get(email=email)
raise forms.ValidationError(_("That e-mail is already being confirmed."))
except EmailValidation.DoesNotExist:
return email
def save(self, *args, **kwargs):
user = super(RegistrationForm, self).save(commit=False)
user.is_active = not getattr(settings, "REQUIRE_EMAIL_CONFIRMATION", False)
user.save()
if self.cleaned_data.get('email'):
EmailValidation.objects.add(user=user, email=user.email)
return user
try:
_temp = settings.REGISTRATION_FORM.split('.')
_form_class = _temp.pop()
_module = __import__('.'.join(_temp), globals(), locals(), [_form_class])
_RegistrationForm = getattr(_module, _form_class)
except:
_RegistrationForm = RegistrationForm
class EmailValidationForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
"""
Verify that the email exists
"""
email = self.cleaned_data.get("email")
if not (User.objects.filter(email=email) or EmailValidation.objects.filter(email=email)):
return email
raise forms.ValidationError(_("That e-mail is already used."))
class ResendEmailValidationForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
"""
Verify that the email exists
"""
email = self.cleaned_data.get("email")
if User.objects.filter(email=email) or EmailValidation.objects.filter(email=email):
return email
raise forms.ValidationError(_("That e-mail isn't registered."))
| bsd-2-clause |
Stavitsky/neutron | neutron/tests/functional/sanity/test_sanity.py | 16 | 2331 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.cmd.sanity import checks
from neutron.tests import base
from neutron.tests.functional import base as functional_base
class SanityTestCase(base.BaseTestCase):
"""Sanity checks that do not require root access.
Tests that just call checks.some_function() are to ensure that
neutron-sanity-check runs without throwing an exception, as in the case
where someone modifies the API without updating the check script.
"""
def setUp(self):
super(SanityTestCase, self).setUp()
def test_nova_notify_runs(self):
checks.nova_notify_supported()
def test_dnsmasq_version(self):
checks.dnsmasq_version_supported()
class SanityTestCaseRoot(functional_base.BaseSudoTestCase):
"""Sanity checks that require root access.
Tests that just call checks.some_function() are to ensure that
neutron-sanity-check runs without throwing an exception, as in the case
where someone modifies the API without updating the check script.
"""
def test_ovs_vxlan_support_runs(self):
checks.ovs_vxlan_supported()
def test_iproute2_vxlan_support_runs(self):
checks.iproute2_vxlan_supported()
def test_ovs_patch_support_runs(self):
checks.patch_supported()
def test_arp_responder_runs(self):
checks.arp_responder_supported()
def test_arp_header_match_runs(self):
checks.arp_header_match_supported()
def test_vf_management_runs(self):
checks.vf_management_supported()
def test_namespace_root_read_detection_runs(self):
checks.netns_read_requires_helper()
def test_ovsdb_native_supported_runs(self):
checks.ovsdb_native_supported()
| apache-2.0 |
HyperBaton/ansible | lib/ansible/modules/network/onyx/onyx_syslog_files.py | 9 | 9770 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: onyx_syslog_files
version_added: "2.10"
author: "Anas Shami (@anass)"
short_description: Configure file management syslog module
description:
- This module provides declarative management of syslog
on Mellanox ONYX network devices.
notes:
options:
debug:
description:
- Configure settings for debug log files
type: bool
default: False
delete_group:
description:
- Delete certain log files
choices: ['current', 'oldest']
type: str
rotation:
description:
- rotation related attributes
type: dict
suboptions:
frequency:
description:
- Rotate log files on a fixed time-based schedule
choices: ['daily', 'weekly', 'monthly']
type: str
force:
description:
- force an immediate rotation of log files
type: bool
max_num:
description:
- Sepcify max_num of old log files to keep
type: int
size:
description:
- Rotate files when they pass max size
type: float
size_pct:
description:
- Rotatoe files when they pass percent of HD
type: float
upload_url:
description:
- upload local log files to remote host (ftp, scp, sftp, tftp) with format protocol://username[:password]@server/path
type: str
upload_file:
description:
- Upload compressed log file (current or filename)
type: str
"""
EXAMPLES = """
- name: syslog delete old files
- onyx_syslog_files:
delete_group: oldest
- name: syslog upload file
- onyx_syslog_files:
upload_url: scp://username:password@hostnamepath/filename
upload_file: current
- name: syslog rotation force, frequency and max number
- onyx_syslog_files:
rotation:
force: true
max_num: 30
frequency: daily
size: 128
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- logging files delete current
- logging files rotate criteria
- logging files upload current url
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.onyx.onyx import show_cmd
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxSyslogFilesModule(BaseOnyxModule):
MAX_FILES = 999999
URL_REGEX = re.compile(
r'^(ftp|scp|ftps):\/\/[a-z0-9\.]*:(.*)@(.*):([a-zA-Z\/\/])*$')
FREQUANCIES = ['daily', 'weekly', 'monthly']
ROTATION_KEYS = ['frequency', 'max_num', 'size', 'size_pct', 'force']
ROTATION_CMDS = {'size': 'logging {0} rotation criteria size {1}',
'frequency': 'logging {0} rotation criteria frequency {1}',
'max_num': 'logging {0} rotation max-num {1}',
'size_pct': 'logging {0} rotation criteria size-pct {1}',
'force': 'logging {0} rotation force'}
def init_module(self):
"""" Ansible module initialization
"""
rotation_spec = dict(frequency=dict(choices=self.FREQUANCIES),
max_num=dict(type="int"),
force=dict(type="bool"),
size=dict(type="float"),
size_pct=dict(type="float"))
element_spec = dict(delete_group=dict(choices=['oldest', 'current']),
rotation=dict(type="dict", options=rotation_spec),
upload_file=dict(type="str"),
upload_url=dict(type="str"),
debug=dict(type="bool", default=False))
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_together=[['upload_file', 'upload_url']])
def validate_rotation(self, rotation):
size_pct = rotation.get('size_pct', None)
max_num = rotation.get('max_num', None)
if size_pct is not None and (float(size_pct) < 0 or float(size_pct) > 100):
self._module.fail_json(
msg='logging size_pct must be in range 0-100')
elif max_num is not None and (int(max_num) < 0 or int(max_num) > self.MAX_FILES):
self._module.fail_json(
msg='logging max_num must be positive number less than {0}'.format(self.MAX_FILES))
def validate_upload_url(self, upload_url):
check = self.URL_REGEX.match(upload_url)
if upload_url and not check:
self._module.fail_json(
msg='Invalid url, make sure that you use "[ftp, scp, tftp, sftp]://username:password@hostname:/location" format')
def show_logging(self):
show_logging = show_cmd(self._module, "show logging", json_fmt=True, fail_on_error=False)
running_config = show_cmd(self._module, "show running-config | include .*logging.*debug-files.*", json_fmt=True, fail_on_error=False)
if len(show_logging) > 0:
show_logging[0]['debug'] = running_config['Lines'] if 'Lines' in running_config else []
else:
show_logging = [{
'debug': running_config['Lines'] if 'Lines' in running_config else []
}]
return show_logging
def load_current_config(self):
self._current_config = dict()
current_config = self.show_logging()[0]
freq = current_config.get('Log rotation frequency') # daily (Once per day at midnight)
size = current_config.get('Log rotation size threshold') # 19.07 megabytes or 10.000% of partition (987.84 megabytes)
max_num = current_config.get('Number of archived log files to keep')
if freq is not None:
freq_str = freq.split()[0]
self._current_config['frequency'] = freq_str
if size is not None:
size_arr = size.split(' ')
if '%' in size:
size_pct_value = size_arr[0].replace('%', '')
self._current_config['size_pct'] = float(size_pct_value)
size_value = re.sub(r'(\(|\)|megabytes)', '', size_arr[-2]).strip()
self._current_config['size'] = float(size_value)
else:
size_value = size_arr[0]
self._current_config['size'] = float(size_value)
if max_num is not None:
self._current_config['max_num'] = int(max_num)
'''debug params'''
for line in current_config['debug']:
if 'size' in line:
self._current_config['debug_size'] = float(line.split(' ')[-1])
elif 'frequency' in line:
self._current_config['debug_frequency'] = line.split(' ')[-1]
elif 'size-pct' in line:
self._current_config['debug_size_pct'] = float(line.split(' ')[-1])
elif 'max-num' in line:
self._current_config['debug_max_num'] = int(line.split(' ')[-1])
def get_required_config(self):
self._required_config = dict()
required_config = dict()
module_params = self._module.params
delete_group = module_params.get('delete_group')
upload_file = module_params.get('upload_file')
rotation = module_params.get('rotation')
if delete_group:
required_config['delete_group'] = delete_group
if upload_file:
required_config.update({'upload_file': upload_file,
'upload_url': module_params.get('upload_url')})
if rotation:
required_config['rotation'] = rotation
required_config['debug'] = module_params['debug']
self.validate_param_values(required_config)
self._required_config = required_config
def generate_commands(self):
required_config = self._required_config
current_config = self._current_config
logging_files_type = 'debug-files' if required_config['debug'] else 'files'
debug_prefix = 'debug_' if required_config['debug'] else ''
rotation = required_config.get('rotation')
if rotation:
for key in rotation:
if rotation.get(key) and current_config.get(debug_prefix + key) != rotation.get(key):
cmd = self.ROTATION_CMDS[key].format(logging_files_type, rotation[key]) if key != 'force' else\
self.ROTATION_CMDS[key].format(logging_files_type)
self._commands.append(cmd)
delete_group = required_config.get('delete_group')
if delete_group:
self._commands.append('logging {0} delete {1}'.format(logging_files_type,
delete_group))
upload_file = required_config.get('upload_file')
if upload_file:
self._commands.append('logging {0} upload {1} {2}'.format(logging_files_type,
upload_file, required_config.get('upload_url')))
def main():
""" main entry point for module execution
"""
OnyxSyslogFilesModule.main()
if __name__ == '__main__':
main()
| gpl-3.0 |
auth0/auth0-python | auth0/v3/management/roles.py | 1 | 6446 | from .rest import RestClient
class Roles(object):
"""Auth0 roles endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
"""
def __init__(self, domain, token, telemetry=True, timeout=5.0, protocol="https"):
self.domain = domain
self.protocol = protocol
self.client = RestClient(jwt=token, telemetry=telemetry, timeout=timeout)
def _url(self, id=None):
url = '{}://{}/api/v2/roles'.format(self.protocol, self.domain)
if id is not None:
return '{}/{}'.format(url, id)
return url
def list(self, page=0, per_page=25, include_totals=True, name_filter=None):
"""List or search roles.
Args:
page (int, optional): The result's page number (zero based). By default,
retrieves the first page of results.
per_page (int, optional): The amount of entries per page. By default,
retrieves 25 results per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise. Defaults to True.
name_filter (str, optional): A case-insensitive filter to apply
to search for roles by name.
See: https://auth0.com/docs/api/management/v2#!/Roles/get_roles
"""
params = {
'per_page': per_page,
'page': page,
'include_totals': str(include_totals).lower(),
'name_filter': name_filter
}
return self.client.get(self._url(), params=params)
def create(self, body):
"""Creates a new role.
Args:
body (dict): the attributes for the role to create.
See: https://auth0.com/docs/api/v2#!/Roles/post_roles
"""
return self.client.post(self._url(), data=body)
def get(self, id):
"""Get a role.
Args:
id (str): The id of the role to retrieve.
See: https://auth0.com/docs/api/management/v2#!/Roles/get_roles_by_id
"""
return self.client.get(self._url(id))
def delete(self, id):
"""Delete a role.
Args:
id (str): The id of the role to delete.
See: https://auth0.com/docs/api/management/v2#!/Roles/delete_roles_by_id
"""
return self.client.delete(self._url(id))
def update(self, id, body):
"""Update a role with the attributes passed in 'body'
Args:
id (str): The id of the role to update.
body (dict): the attributes to update on the role.
See: https://auth0.com/docs/api/management/v2#!/Roles/patch_roles_by_id
"""
return self.client.patch(self._url(id), data=body)
def list_users(self, id, page=0, per_page=25, include_totals=True):
"""List the users that have been associated with a given role.
Args:
id (str): The role's id.
page (int, optional): The result's page number (zero based). By default,
retrieves the first page of results.
per_page (int, optional): The amount of entries per page. By default,
retrieves 25 results per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise. Defaults to True.
See https://auth0.com/docs/api/management/v2#!/Roles/get_role_user
"""
params = {
'per_page': per_page,
'page': page,
'include_totals': str(include_totals).lower()
}
url = self._url('{}/users'.format(id))
return self.client.get(url, params=params)
def add_users(self, id, users):
"""Assign users to a role.
Args:
id (str): The role's id.
users (list of str): A list of users ids to add to this role.
See https://auth0.com/docs/api/management/v2#!/Roles/post_role_users
"""
url = self._url('{}/users'.format(id))
body = {'users': users}
return self.client.post(url, data=body)
def list_permissions(self, id, page=0, per_page=25, include_totals=True):
"""List the permissions associated to a role.
Args:
id (str): The role's id.
page (int, optional): The result's page number (zero based). By default,
retrieves the first page of results.
per_page (int, optional): The amount of entries per page. By default,
retrieves 25 results per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise. Defaults to True.
See https://auth0.com/docs/api/management/v2#!/Roles/get_role_permission
"""
params = {
'per_page': per_page,
'page': page,
'include_totals': str(include_totals).lower()
}
url = self._url('{}/permissions'.format(id))
return self.client.get(url, params=params)
def remove_permissions(self, id, permissions):
"""Unassociates permissions from a role.
Args:
id (str): The role's id.
permissions (list of str): A list of permission ids to unassociate from the role.
See https://auth0.com/docs/api/management/v2#!/Roles/delete_role_permission_assignment
"""
url = self._url('{}/permissions'.format(id))
body = {'permissions': permissions}
return self.client.delete(url, data=body)
def add_permissions(self, id, permissions):
"""Associates permissions with a role.
Args:
id (str): The role's id.
permissions (list of str): A list of permission ids to associate to the role.
See https://auth0.com/docs/api/management/v2#!/Roles/post_role_permission_assignment
"""
url = self._url('{}/permissions'.format(id))
body = {'permissions': permissions}
return self.client.post(url, data=body)
| mit |
jaj42/GraPhysio | graphysio/ui/filter.py | 2 | 1823 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'filter.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Filter(object):
def setupUi(self, Filter):
Filter.setObjectName("Filter")
Filter.resize(387, 371)
self.verticalLayout = QtWidgets.QVBoxLayout(Filter)
self.verticalLayout.setObjectName("verticalLayout")
self.table = QtWidgets.QTableWidget(Filter)
self.table.setColumnCount(2)
self.table.setObjectName("table")
self.table.setRowCount(0)
self.verticalLayout.addWidget(self.table)
self.chkNewcurve = QtWidgets.QCheckBox(Filter)
self.chkNewcurve.setObjectName("chkNewcurve")
self.verticalLayout.addWidget(self.chkNewcurve)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.okButton = QtWidgets.QPushButton(Filter)
self.okButton.setObjectName("okButton")
self.horizontalLayout.addWidget(self.okButton)
self.cancelButton = QtWidgets.QPushButton(Filter)
self.cancelButton.setObjectName("cancelButton")
self.horizontalLayout.addWidget(self.cancelButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Filter)
QtCore.QMetaObject.connectSlotsByName(Filter)
def retranslateUi(self, Filter):
_translate = QtCore.QCoreApplication.translate
Filter.setWindowTitle(_translate("Filter", "Filter"))
self.chkNewcurve.setText(_translate("Filter", "Create new curve"))
self.okButton.setText(_translate("Filter", "Ok"))
self.cancelButton.setText(_translate("Filter", "Cancel"))
| isc |
chrisfilda/edx_platform | common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py | 10 | 5756 | import json
import logging
from dogapi import dog_stats_api
from .grading_service_module import GradingService, GradingServiceError
log = logging.getLogger(__name__)
class PeerGradingService(GradingService):
"""
Interface with the grading controller for peer grading
"""
METRIC_NAME = 'edxapp.open_ended_grading.peer_grading_service'
def __init__(self, config, system):
config['system'] = system
super(PeerGradingService, self).__init__(config)
self.url = config['url'] + config['peer_grading']
self.login_url = self.url + '/login/'
self.get_next_submission_url = self.url + '/get_next_submission/'
self.save_grade_url = self.url + '/save_grade/'
self.is_student_calibrated_url = self.url + '/is_student_calibrated/'
self.show_calibration_essay_url = self.url + '/show_calibration_essay/'
self.save_calibration_essay_url = self.url + '/save_calibration_essay/'
self.get_problem_list_url = self.url + '/get_problem_list/'
self.get_notifications_url = self.url + '/get_notifications/'
self.get_data_for_location_url = self.url + '/get_data_for_location/'
self.system = system
def get_data_for_location(self, problem_location, student_id):
params = {'location': problem_location, 'student_id': student_id}
result = self.get(self.get_data_for_location_url, params)
self._record_result('get_data_for_location', result)
for key in result.keys():
if key in ('success', 'error', 'version'):
continue
dog_stats_api.histogram(
self._metric_name('get_data_for_location.{}'.format(key)),
result[key],
)
return result
def get_next_submission(self, problem_location, grader_id):
result = self._render_rubric(self.get(
self.get_next_submission_url,
{
'location': problem_location,
'grader_id': grader_id
}
))
self._record_result('get_next_submission', result)
return result
def save_grade(self, **kwargs):
data = kwargs
data.update({'rubric_scores_complete': True})
result = self.post(self.save_grade_url, data)
self._record_result('save_grade', result)
return result
def is_student_calibrated(self, problem_location, grader_id):
params = {'problem_id': problem_location, 'student_id': grader_id}
result = self.get(self.is_student_calibrated_url, params)
self._record_result(
'is_student_calibrated',
result,
tags=['calibrated:{}'.format(result.get('calibrated'))]
)
return result
def show_calibration_essay(self, problem_location, grader_id):
params = {'problem_id': problem_location, 'student_id': grader_id}
result = self._render_rubric(self.get(self.show_calibration_essay_url, params))
self._record_result('show_calibration_essay', result)
return result
def save_calibration_essay(self, **kwargs):
data = kwargs
data.update({'rubric_scores_complete': True})
result = self.post(self.save_calibration_essay_url, data)
self._record_result('show_calibration_essay', result)
return result
def get_problem_list(self, course_id, grader_id):
params = {'course_id': course_id, 'student_id': grader_id}
result = self.get(self.get_problem_list_url, params)
self._record_result('get_problem_list', result)
dog_stats_api.histogram(
self._metric_name('get_problem_list.result.length'),
len(result.get('problem_list',[]))
)
return result
def get_notifications(self, course_id, grader_id):
params = {'course_id': course_id, 'student_id': grader_id}
result = self.get(self.get_notifications_url, params)
self._record_result(
'get_notifications',
result,
tags=['needs_to_peer_grade:{}'.format(result.get('student_needs_to_peer_grade'))]
)
return result
"""
This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
"""
class MockPeerGradingService(object):
def get_next_submission(self, problem_location, grader_id):
return {
'success': True,
'submission_id': 1,
'submission_key': "",
'student_response': 'Sample student response.',
'prompt': 'Sample submission prompt.',
'rubric': 'Placeholder text for the full rubric.',
'max_score': 4
}
def save_grade(self, **kwargs):
return {'success': True}
def is_student_calibrated(self, problem_location, grader_id):
return {'success': True, 'calibrated': True}
def show_calibration_essay(self, problem_location, grader_id):
return {'success': True,
'submission_id': 1,
'submission_key': '',
'student_response': 'Sample student response.',
'prompt': 'Sample submission prompt.',
'rubric': 'Placeholder text for the full rubric.',
'max_score': 4}
def save_calibration_essay(self, **kwargs):
return {'success': True, 'actual_score': 2}
def get_problem_list(self, course_id, grader_id):
return {'success': True,
'problem_list': [
]}
def get_data_for_location(self, problem_location, student_id):
return {"version": 1, "count_graded": 3, "count_required": 3, "success": True, "student_sub_count": 1, 'submissions_available' : 0}
| agpl-3.0 |
sallaire/Sick-Beard | sickbeard/history.py | 35 | 2768 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import db
import datetime
from sickbeard.common import SNATCHED, SUBTITLED, Quality
dateFormat = "%Y%m%d%H%M%S"
def _logHistoryItem(action, showid, season, episode, quality, resource, provider):
logDate = datetime.datetime.today().strftime(dateFormat)
myDB = db.DBConnection()
myDB.action("INSERT INTO history (action, date, showid, season, episode, quality, resource, provider) VALUES (?,?,?,?,?,?,?,?)",
[action, logDate, showid, season, episode, quality, resource, provider])
def logSnatch(searchResult):
for curEpObj in searchResult.episodes:
showid = int(curEpObj.show.tvdbid)
season = int(curEpObj.season)
episode = int(curEpObj.episode)
quality = searchResult.quality
providerClass = searchResult.provider
if providerClass != None:
provider = providerClass.name
else:
provider = "unknown"
action = Quality.compositeStatus(SNATCHED, searchResult.quality)
resource = searchResult.name
_logHistoryItem(action, showid, season, episode, quality, resource, provider)
def logDownload(episode, filename, new_ep_quality, release_group=None):
showid = int(episode.show.tvdbid)
season = int(episode.season)
epNum = int(episode.episode)
quality = new_ep_quality
# store the release group as the provider if possible
if release_group:
provider = release_group
else:
provider = -1
action = episode.status
_logHistoryItem(action, showid, season, epNum, quality, filename, provider)
def logSubtitle(showid, season, episode, status, subtitleResult):
resource = subtitleResult.path
provider = subtitleResult.service
status, quality = Quality.splitCompositeStatus(status)
action = Quality.compositeStatus(SUBTITLED, quality)
_logHistoryItem(action, showid, season, episode, quality, resource, provider) | gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.