repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
mancoast/CPythonPyc_test | cpython/231_test_cfgparser.py | 9 | 11980 | import ConfigParser
import StringIO
import unittest
from test import test_support
class TestCaseBase(unittest.TestCase):
def newconfig(self, defaults=None):
if defaults is None:
self.cf = self.config_class()
else:
self.cf = self.config_class(defaults)
return self.cf
def fromstring(self, string, defaults=None):
cf = self.newconfig(defaults)
sio = StringIO.StringIO(string)
cf.readfp(sio)
return cf
def test_basic(self):
cf = self.fromstring(
"[Foo Bar]\n"
"foo=bar\n"
"[Spacey Bar]\n"
"foo = bar\n"
"[Commented Bar]\n"
"foo: bar ; comment\n"
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[Section\\with$weird%characters[\t]\n"
"[Internationalized Stuff]\n"
"foo[bg]: Bulgarian\n"
"foo=Default\n"
"foo[en]=English\n"
"foo[de]=Deutsch\n"
"[Spaces]\n"
"key with spaces : value\n"
"another with spaces = splat!\n"
)
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, [r'Commented Bar',
r'Foo Bar',
r'Internationalized Stuff',
r'Long Line',
r'Section\with$weird%characters[' '\t',
r'Spaces',
r'Spacey Bar',
])
# The use of spaces in the section names serves as a
# regression test for SourceForge bug #583248:
# http://www.python.org/sf/583248
eq(cf.get('Foo Bar', 'foo'), 'bar')
eq(cf.get('Spacey Bar', 'foo'), 'bar')
eq(cf.get('Commented Bar', 'foo'), 'bar')
eq(cf.get('Spaces', 'key with spaces'), 'value')
eq(cf.get('Spaces', 'another with spaces'), 'splat!')
self.failIf('__name__' in cf.options("Foo Bar"),
'__name__ "option" should not be exposed by the API!')
# Make sure the right things happen for remove_option();
# added to include check for SourceForge bug #123324:
self.failUnless(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report existance of option")
self.failIf(cf.has_option('Foo Bar', 'foo'),
"remove_option() failed to remove option")
self.failIf(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report non-existance of option"
" that was removed")
self.assertRaises(ConfigParser.NoSectionError,
cf.remove_option, 'No Such Section', 'foo')
eq(cf.get('Long Line', 'foo'),
'this line is much, much longer than my editor\nlikes it.')
def test_case_sensitivity(self):
cf = self.newconfig()
cf.add_section("A")
cf.add_section("a")
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, ["A", "a"])
cf.set("a", "B", "value")
eq(cf.options("a"), ["b"])
eq(cf.get("a", "b"), "value",
"could not locate option, expecting case-insensitive option names")
self.failUnless(cf.has_option("a", "b"))
cf.set("A", "A-B", "A-B value")
for opt in ("a-b", "A-b", "a-B", "A-B"):
self.failUnless(
cf.has_option("A", opt),
"has_option() returned false for option which should exist")
eq(cf.options("A"), ["a-b"])
eq(cf.options("a"), ["b"])
cf.remove_option("a", "B")
eq(cf.options("a"), [])
# SF bug #432369:
cf = self.fromstring(
"[MySection]\nOption: first line\n\tsecond line\n")
eq(cf.options("MySection"), ["option"])
eq(cf.get("MySection", "Option"), "first line\nsecond line")
# SF bug #561822:
cf = self.fromstring("[section]\nnekey=nevalue\n",
defaults={"key":"value"})
self.failUnless(cf.has_option("section", "Key"))
def test_parse_errors(self):
self.newconfig()
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces: splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces= splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\noption-without-value\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n:value-without-option-name\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n=value-without-option-name\n")
self.parse_error(ConfigParser.MissingSectionHeaderError,
"No Section!\n")
def parse_error(self, exc, src):
sio = StringIO.StringIO(src)
self.assertRaises(exc, self.cf.readfp, sio)
def test_query_errors(self):
cf = self.newconfig()
self.assertEqual(cf.sections(), [],
"new ConfigParser should have no defined sections")
self.failIf(cf.has_section("Foo"),
"new ConfigParser should have no acknowledged sections")
self.assertRaises(ConfigParser.NoSectionError,
cf.options, "Foo")
self.assertRaises(ConfigParser.NoSectionError,
cf.set, "foo", "bar", "value")
self.get_error(ConfigParser.NoSectionError, "foo", "bar")
cf.add_section("foo")
self.get_error(ConfigParser.NoOptionError, "foo", "bar")
def get_error(self, exc, section, option):
try:
self.cf.get(section, option)
except exc, e:
return e
else:
self.fail("expected exception type %s.%s"
% (exc.__module__, exc.__name__))
def test_boolean(self):
cf = self.fromstring(
"[BOOLTEST]\n"
"T1=1\n"
"T2=TRUE\n"
"T3=True\n"
"T4=oN\n"
"T5=yes\n"
"F1=0\n"
"F2=FALSE\n"
"F3=False\n"
"F4=oFF\n"
"F5=nO\n"
"E1=2\n"
"E2=foo\n"
"E3=-1\n"
"E4=0.1\n"
"E5=FALSE AND MORE"
)
for x in range(1, 5):
self.failUnless(cf.getboolean('BOOLTEST', 't%d' % x))
self.failIf(cf.getboolean('BOOLTEST', 'f%d' % x))
self.assertRaises(ValueError,
cf.getboolean, 'BOOLTEST', 'e%d' % x)
def test_weird_errors(self):
cf = self.newconfig()
cf.add_section("Foo")
self.assertRaises(ConfigParser.DuplicateSectionError,
cf.add_section, "Foo")
def test_write(self):
cf = self.fromstring(
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[DEFAULT]\n"
"foo: another very\n"
" long line"
)
output = StringIO.StringIO()
cf.write(output)
self.assertEqual(
output.getvalue(),
"[DEFAULT]\n"
"foo = another very\n"
"\tlong line\n"
"\n"
"[Long Line]\n"
"foo = this line is much, much longer than my editor\n"
"\tlikes it.\n"
"\n"
)
# shared by subclasses
def get_interpolation_config(self):
return self.fromstring(
"[Foo]\n"
"bar=something %(with1)s interpolation (1 step)\n"
"bar9=something %(with9)s lots of interpolation (9 steps)\n"
"bar10=something %(with10)s lots of interpolation (10 steps)\n"
"bar11=something %(with11)s lots of interpolation (11 steps)\n"
"with11=%(with10)s\n"
"with10=%(with9)s\n"
"with9=%(with8)s\n"
"with8=%(with7)s\n"
"with7=%(with6)s\n"
"with6=%(with5)s\n"
"with5=%(with4)s\n"
"with4=%(with3)s\n"
"with3=%(with2)s\n"
"with2=%(with1)s\n"
"with1=with\n"
"\n"
"[Mutual Recursion]\n"
"foo=%(bar)s\n"
"bar=%(foo)s\n"
"\n"
"[Interpolation Error]\n"
"name=%(reference)s\n",
# no definition for 'reference'
defaults={"getname": "%(__name__)s"})
def check_items_config(self, expected):
cf = self.fromstring(
"[section]\n"
"name = value\n"
"key: |%(name)s| \n"
"getdefault: |%(default)s|\n"
"getname: |%(__name__)s|",
defaults={"default": "<default>"})
L = list(cf.items("section"))
L.sort()
self.assertEqual(L, expected)
class ConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.ConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "Foo")
eq(cf.get("Foo", "bar"), "something with interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something with lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something with lots of interpolation (10 steps)")
self.get_error(ConfigParser.InterpolationDepthError, "Foo", "bar11")
def test_interpolation_missing_value(self):
cf = self.get_interpolation_config()
e = self.get_error(ConfigParser.InterpolationError,
"Interpolation Error", "name")
self.assertEqual(e.reference, "reference")
self.assertEqual(e.section, "Interpolation Error")
self.assertEqual(e.option, "name")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|<default>|'),
('getname', '|section|'),
('key', '|value|'),
('name', 'value')])
class RawConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.RawConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "%(__name__)s")
eq(cf.get("Foo", "bar"),
"something %(with1)s interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something %(with9)s lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something %(with10)s lots of interpolation (10 steps)")
eq(cf.get("Foo", "bar11"),
"something %(with11)s lots of interpolation (11 steps)")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|%(default)s|'),
('getname', '|%(__name__)s|'),
('key', '|%(name)s|'),
('name', 'value')])
class SafeConfigParserTestCase(ConfigParserTestCase):
config_class = ConfigParser.SafeConfigParser
def test_safe_interpolation(self):
# See http://www.python.org/sf/511737
cf = self.fromstring("[section]\n"
"option1=xxx\n"
"option2=%(option1)s/xxx\n"
"ok=%(option1)s/%%s\n"
"not_ok=%(option2)s/%%s")
self.assertEqual(cf.get("section", "ok"), "xxx/%s")
self.assertEqual(cf.get("section", "not_ok"), "xxx/xxx/%s")
def test_main():
test_support.run_unittest(
ConfigParserTestCase,
RawConfigParserTestCase,
SafeConfigParserTestCase
)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
bsipocz/scikit-image | skimage/viewer/plugins/plotplugin.py | 43 | 2405 | import numpy as np
from ..qt import QtGui
from ..utils import new_plot
from ..utils.canvas import BlitManager, EventManager
from .base import Plugin
__all__ = ['PlotPlugin']
class PlotPlugin(Plugin):
"""Plugin for ImageViewer that contains a plot canvas.
Base class for plugins that contain a Matplotlib plot canvas, which can,
for example, display an image histogram.
See base Plugin class for additional details.
"""
def __init__(self, image_filter=None, height=150, width=400, **kwargs):
super(PlotPlugin, self).__init__(image_filter=image_filter,
height=height, width=width, **kwargs)
self._height = height
self._width = width
self._blit_manager = None
self._tools = []
self._event_manager = None
def attach(self, image_viewer):
super(PlotPlugin, self).attach(image_viewer)
# Add plot for displaying intensity profile.
self.add_plot()
if image_viewer.useblit:
self._blit_manager = BlitManager(self.ax)
self._event_manager = EventManager(self.ax)
def redraw(self):
"""Redraw plot."""
self.canvas.draw_idle()
def add_plot(self):
self.fig, self.ax = new_plot()
self.fig.set_figwidth(self._width / float(self.fig.dpi))
self.fig.set_figheight(self._height / float(self.fig.dpi))
self.canvas = self.fig.canvas
#TODO: Converted color is slightly different than Qt background.
qpalette = QtGui.QPalette()
qcolor = qpalette.color(QtGui.QPalette.Window)
bgcolor = qcolor.toRgb().value()
if np.isscalar(bgcolor):
bgcolor = str(bgcolor / 255.)
self.fig.patch.set_facecolor(bgcolor)
self.layout.addWidget(self.canvas, self.row, 0)
def _update_original_image(self, image):
super(PlotPlugin, self)._update_original_image(image)
self.redraw()
def add_tool(self, tool):
if self._blit_manager:
self._blit_manager.add_artists(tool.artists)
self._tools.append(tool)
self._event_manager.attach(tool)
def remove_tool(self, tool):
if tool not in self._tools:
return
if self._blit_manager:
self._blit_manager.remove_artists(tool.artists)
self._tools.remove(tool)
self._event_manager.detach(tool)
| bsd-3-clause |
firerszd/kbengine | kbe/src/lib/python/Lib/distutils/command/bdist.py | 85 | 5488 | """distutils.command.bdist
Implements the Distutils 'bdist' command (create a built [binary]
distribution)."""
import os
from distutils.core import Command
from distutils.errors import *
from distutils.util import get_platform
def show_formats():
"""Print list of available formats (arguments to "--format" option).
"""
from distutils.fancy_getopt import FancyGetopt
formats = []
for format in bdist.format_commands:
formats.append(("formats=" + format, None,
bdist.format_command[format][1]))
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help("List of available distribution formats:")
class bdist(Command):
description = "create a built (binary) distribution"
user_options = [('bdist-base=', 'b',
"temporary directory for creating built distributions"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('formats=', None,
"formats for distribution (comma-separated list)"),
('dist-dir=', 'd',
"directory to put final built distributions in "
"[default: dist]"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['skip-build']
help_options = [
('help-formats', None,
"lists available distribution formats", show_formats),
]
# The following commands do not take a format option from bdist
no_format_option = ('bdist_rpm',)
# This won't do in reality: will need to distinguish RPM-ish Linux,
# Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
default_format = {'posix': 'gztar',
'nt': 'zip'}
# Establish the preferred order (for the --help-formats option).
format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar',
'wininst', 'zip', 'msi']
# And the real information.
format_command = {'rpm': ('bdist_rpm', "RPM distribution"),
'gztar': ('bdist_dumb', "gzip'ed tar file"),
'bztar': ('bdist_dumb', "bzip2'ed tar file"),
'ztar': ('bdist_dumb', "compressed tar file"),
'tar': ('bdist_dumb', "tar file"),
'wininst': ('bdist_wininst',
"Windows executable installer"),
'zip': ('bdist_dumb', "ZIP file"),
'msi': ('bdist_msi', "Microsoft Installer")
}
def initialize_options(self):
self.bdist_base = None
self.plat_name = None
self.formats = None
self.dist_dir = None
self.skip_build = 0
self.group = None
self.owner = None
def finalize_options(self):
# have to finalize 'plat_name' before 'bdist_base'
if self.plat_name is None:
if self.skip_build:
self.plat_name = get_platform()
else:
self.plat_name = self.get_finalized_command('build').plat_name
# 'bdist_base' -- parent of per-built-distribution-format
# temporary directories (eg. we'll probably have
# "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
if self.bdist_base is None:
build_base = self.get_finalized_command('build').build_base
self.bdist_base = os.path.join(build_base,
'bdist.' + self.plat_name)
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create built distributions "
"on platform %s" % os.name)
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# Figure out which sub-commands we need to run.
commands = []
for format in self.formats:
try:
commands.append(self.format_command[format][0])
except KeyError:
raise DistutilsOptionError("invalid format '%s'" % format)
# Reinitialize and run each command.
for i in range(len(self.formats)):
cmd_name = commands[i]
sub_cmd = self.reinitialize_command(cmd_name)
if cmd_name not in self.no_format_option:
sub_cmd.format = self.formats[i]
# passing the owner and group names for tar archiving
if cmd_name == 'bdist_dumb':
sub_cmd.owner = self.owner
sub_cmd.group = self.group
# If we're going to need to run this command again, tell it to
# keep its temporary files around so subsequent runs go faster.
if cmd_name in commands[i+1:]:
sub_cmd.keep_temp = 1
self.run_command(cmd_name)
| lgpl-3.0 |
nikolas/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/aggregation_regress/tests.py | 38 | 33851 | import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import DEFAULT_DB_ALIAS
from django.db.models import Count, Max, Avg, Sum, StdDev, Variance, F, Q
from django.test import TestCase, Approximate
from models import Author, Book, Publisher, Clues, Entries, HardbackBook
def run_stddev_tests():
"""Check to see if StdDev/Variance tests should be run.
Stddev and Variance are not guaranteed to be available for SQLite, and
are not available for PostgreSQL before 8.2.
"""
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db.backends.sqlite3':
return False
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
connection.ops.check_aggregate_support(StdDevPop())
except:
return False
return True
class AggregationTests(TestCase):
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in kwargs.iteritems():
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] != 'django.db.backends.oracle':
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
#oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page' : 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(select={"manufacture_cost": "price * .5"}).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn=u'067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost' : 'price * .5'}).annotate(mean_auth_age=Avg('authors__age')).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn=u'067232959',
mean_auth_age=45.0,
name=u'Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).values().get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
"contact_id": 3,
"id": 2,
"isbn": u"067232959",
"mean_auth_age": 45.0,
"name": u"Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": 2,
"rating": 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
'contact_id': 3,
'id': 2,
'isbn': u'067232959',
'mean_auth_age': 45.0,
'name': u'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal("23.09"),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': 2,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name').get(pk=1)
self.assertEqual(obj, {
"name": u'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name','mean_auth_age').get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': u'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": u'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': u'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(num_authors=Count('authors')).values()[0]
self.assertEqual(obj, {
'contact_id': 8,
'id': 5,
'isbn': u'013790395',
'name': u'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': 3,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': u'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': u'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': u'Apress', 'num_awards': 3},
{'num_books': 0, 'name': u"Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': u'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2*F('num_books')).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': u'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': u'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': u'Apress', 'num_awards': 3},
{'num_books': 0, 'name': u"Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': u'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(pk=5).annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': u"Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': 5, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
(u'Artificial Intelligence: A Modern Approach', 51.5, u'Prentice Hall', u'Peter Norvig'),
(u'Practical Django Projects', 29.0, u'Apress', u'James Bennett'),
(u'Python Web Development with Django', Approximate(30.333, places=2), u'Prentice Hall', u'Jeffrey Forcier'),
(u'Sams Teach Yourself Django in 24 Hours', 45.0, u'Sams', u'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub':'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub':'publisher_id', 'foo':'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in the ValuesQuerySet, so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEquals(len(results), 9)
self.assertEquals(results[0]['name'], u'Adrian Holovaty')
self.assertEquals(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEquals(len(results), 9)
self.assertEquals(results[0]['name'], u'Adrian Holovaty')
self.assertEquals(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEquals(len(results), 9)
self.assertEquals(results[0]['name'], u'Adrian Holovaty')
self.assertEquals(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
u'Artificial Intelligence: A Modern Approach',
u'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
u'Practical Django Projects',
u'Python Web Development with Django',
u'Sams Teach Yourself Django in 24 Hours',
u'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.datetime(1995, 1, 15, 0, 0),
datetime.datetime(2007, 12, 6, 0, 0)
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets' : '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams"
],
lambda p: p.name
)
publishers = publishers.annotate(n_books=Count("book"))
self.assertEqual(
publishers[0].n_books,
2
)
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
],
lambda p: p.name
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
],
lambda p: p.name
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': u'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': u'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': u'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': u'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(pk__lt=F("book_count")) | Q(rating_sum=None)
).order_by("pk")
self.assertQuerysetEqual(
qs, [
"Apress",
"Jonno's House of Books",
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
if run_stddev_tests():
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
| gpl-3.0 |
unreal666/outwiker | plugins/markdown/markdown/markdown_plugin_libs/pygments/lexers/slash.py | 4 | 8508 | # -*- coding: utf-8 -*-
"""
pygments.lexers.slash
~~~~~~~~~~~~~~~~~~~~~
Lexer for the `Slash <https://github.com/arturadib/Slash-A>`_ programming
language.
:copyright: Copyright 2012 by GitHub, Inc
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, bygroups, DelegatingLexer
from pygments.token import Name, Number, String, Comment, Punctuation, \
Other, Keyword, Operator, Whitespace
__all__ = ['SlashLexer']
class SlashLanguageLexer(ExtendedRegexLexer):
_nkw = r'(?=[^a-zA-Z_0-9])'
def move_state(new_state):
return ("#pop", new_state)
def right_angle_bracket(lexer, match, ctx):
if len(ctx.stack) > 1 and ctx.stack[-2] == "string":
ctx.stack.pop()
yield match.start(), String.Interpol, "}"
ctx.pos = match.end()
pass
tokens = {
"root": [
(r"<%=", Comment.Preproc, move_state("slash")),
(r"<%!!", Comment.Preproc, move_state("slash")),
(r"<%#.*?%>", Comment.Multiline),
(r"<%", Comment.Preproc, move_state("slash")),
(r".|\n", Other),
],
"string": [
(r"\\", String.Escape, move_state("string_e")),
(r"\"", String, move_state("slash")),
(r"#\{", String.Interpol, "slash"),
(r'.|\n', String),
],
"string_e": [
(r'n', String.Escape, move_state("string")),
(r't', String.Escape, move_state("string")),
(r'r', String.Escape, move_state("string")),
(r'e', String.Escape, move_state("string")),
(r'x[a-fA-F0-9]{2}', String.Escape, move_state("string")),
(r'.', String.Escape, move_state("string")),
],
"regexp": [
(r'}[a-z]*', String.Regex, move_state("slash")),
(r'\\(.|\n)', String.Regex),
(r'{', String.Regex, "regexp_r"),
(r'.|\n', String.Regex),
],
"regexp_r": [
(r'}[a-z]*', String.Regex, "#pop"),
(r'\\(.|\n)', String.Regex),
(r'{', String.Regex, "regexp_r"),
],
"slash": [
(r"%>", Comment.Preproc, move_state("root")),
(r"\"", String, move_state("string")),
(r"'[a-zA-Z0-9_]+", String),
(r'%r{', String.Regex, move_state("regexp")),
(r'/\*.*?\*/', Comment.Multiline),
(r"(#|//).*?\n", Comment.Single),
(r'-?[0-9]+e[+-]?[0-9]+', Number.Float),
(r'-?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?', Number.Float),
(r'-?[0-9]+', Number.Integer),
(r'nil'+_nkw, Name.Builtin),
(r'true'+_nkw, Name.Builtin),
(r'false'+_nkw, Name.Builtin),
(r'self'+_nkw, Name.Builtin),
(r'(class)(\s+)([A-Z][a-zA-Z0-9_\']*)',
bygroups(Keyword, Whitespace, Name.Class)),
(r'class'+_nkw, Keyword),
(r'extends'+_nkw, Keyword),
(r'(def)(\s+)(self)(\s*)(\.)(\s*)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)',
bygroups(Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function)),
(r'(def)(\s+)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'def'+_nkw, Keyword),
(r'if'+_nkw, Keyword),
(r'elsif'+_nkw, Keyword),
(r'else'+_nkw, Keyword),
(r'unless'+_nkw, Keyword),
(r'for'+_nkw, Keyword),
(r'in'+_nkw, Keyword),
(r'while'+_nkw, Keyword),
(r'until'+_nkw, Keyword),
(r'and'+_nkw, Keyword),
(r'or'+_nkw, Keyword),
(r'not'+_nkw, Keyword),
(r'lambda'+_nkw, Keyword),
(r'try'+_nkw, Keyword),
(r'catch'+_nkw, Keyword),
(r'return'+_nkw, Keyword),
(r'next'+_nkw, Keyword),
(r'last'+_nkw, Keyword),
(r'throw'+_nkw, Keyword),
(r'use'+_nkw, Keyword),
(r'switch'+_nkw, Keyword),
(r'\\', Keyword),
(r'λ', Keyword),
(r'__FILE__'+_nkw, Name.Builtin.Pseudo),
(r'__LINE__'+_nkw, Name.Builtin.Pseudo),
(r'[A-Z][a-zA-Z0-9_\']*'+_nkw, Name.Constant),
(r'[a-z_][a-zA-Z0-9_\']*'+_nkw, Name),
(r'@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Instance),
(r'@@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Class),
(r'\(', Punctuation),
(r'\)', Punctuation),
(r'\[', Punctuation),
(r'\]', Punctuation),
(r'\{', Punctuation),
(r'\}', right_angle_bracket),
(r';', Punctuation),
(r',', Punctuation),
(r'<<=', Operator),
(r'>>=', Operator),
(r'<<', Operator),
(r'>>', Operator),
(r'==', Operator),
(r'!=', Operator),
(r'=>', Operator),
(r'=', Operator),
(r'<=>', Operator),
(r'<=', Operator),
(r'>=', Operator),
(r'<', Operator),
(r'>', Operator),
(r'\+\+', Operator),
(r'\+=', Operator),
(r'-=', Operator),
(r'\*\*=', Operator),
(r'\*=', Operator),
(r'\*\*', Operator),
(r'\*', Operator),
(r'/=', Operator),
(r'\+', Operator),
(r'-', Operator),
(r'/', Operator),
(r'%=', Operator),
(r'%', Operator),
(r'^=', Operator),
(r'&&=', Operator),
(r'&=', Operator),
(r'&&', Operator),
(r'&', Operator),
(r'\|\|=', Operator),
(r'\|=', Operator),
(r'\|\|', Operator),
(r'\|', Operator),
(r'!', Operator),
(r'\.\.\.', Operator),
(r'\.\.', Operator),
(r'\.', Operator),
(r'::', Operator),
(r':', Operator),
(r'(\s|\n)+', Whitespace),
(r'[a-z_][a-zA-Z0-9_\']*', Name.Variable),
],
}
class SlashLexer(DelegatingLexer):
"""
Lexer for the Slash programming language.
.. versionadded:: 2.4
"""
name = 'Slash'
aliases = ['slash']
filenames = ['*.sl']
def __init__(self, **options):
from pygments.lexers.web import HtmlLexer
super(SlashLexer, self).__init__(HtmlLexer, SlashLanguageLexer, **options)
| gpl-3.0 |
ossdemura/django-miniblog | src/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py | 450 | 9668 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll, winapi_test
winterm = None
if windll is not None:
winterm = WinTerm()
def is_stream_closed(stream):
return not hasattr(stream, 'closed') or stream.closed
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
# We test if the WinAPI works, because even if we are on Windows
# we may be using a terminal that doesn't support the WinAPI
# (e.g. Cygwin Terminal). In this case it's up to the terminal
# to support the ANSI codes.
conversion_supported = on_windows and winapi_test()
# should we strip ANSI sequences from our output?
if strip is None:
strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not is_stream_closed(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command in '\x07': # \x07 = BEL
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
| mit |
dstaple/z3test | old-regressions/python/tst5.py | 3 | 1214 |
# Copyright (c) 2015 Microsoft Corporation
from z3 import *
f = Function('f', IntSort(), RealSort(), IntSort())
try:
print(f.domain(3))
except Z3Exception:
print("Failed")
x = Int('x')
print(f(1, 1))
print(f(1, 1).sort())
print(f(x, 1).num_args())
print(f(x+1, 1).children())
print(f(x+1, 1).arg(0))
print(f(x+1, 1).arg(0).eq(x+1))
print(f(x+1, x).decl()(2, x+1))
print(is_expr(1))
print(is_expr(IntVal(1)))
print(is_expr(x + 1))
print(is_app(1))
print(is_app(IntVal(1)))
print(is_app(x + 1))
print(is_expr(ForAll(x, x > 0)))
print(is_app(ForAll(x, x > 0)))
print(is_const(IntVal(1)))
print(is_const(x))
print(is_const(x + 1))
print(is_const(ForAll(x, x > 0)))
print(ForAll(x, x > 0).body().arg(0))
print(is_expr(ForAll(x, x > 0).body().arg(0)))
print(is_app(ForAll(x, x > 0).body().arg(0)))
print(is_const(ForAll(x, x > 0).body().arg(0)))
print(is_var(ForAll(x, x > 0).body().arg(0)))
print(is_var(x))
print(If(True, x, x+1))
ctx = Context()
print(If(True, x.translate(ctx), (x+1).translate(ctx)) == If(True, 1, 1, ctx))
print(Distinct(x, x+1, x+2))
try:
print(Distinct(1, 2, 3))
except Z3Exception:
print("failed")
print(And(Distinct(x.translate(ctx), 1),
x.translate(ctx) > 0))
| mit |
rossburton/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/test/test_threads.py | 5 | 13048 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test methods in twisted.internet.threads and reactor thread APIs.
"""
import sys, os, time
from twisted.trial import unittest
from twisted.internet import reactor, defer, interfaces, threads, protocol, error
from twisted.python import failure, threadable, log, threadpool
class ReactorThreadsTestCase(unittest.TestCase):
"""
Tests for the reactor threading API.
"""
def test_suggestThreadPoolSize(self):
"""
Try to change maximum number of threads.
"""
reactor.suggestThreadPoolSize(34)
self.assertEqual(reactor.threadpool.max, 34)
reactor.suggestThreadPoolSize(4)
self.assertEqual(reactor.threadpool.max, 4)
def _waitForThread(self):
"""
The reactor's threadpool is only available when the reactor is running,
so to have a sane behavior during the tests we make a dummy
L{threads.deferToThread} call.
"""
return threads.deferToThread(time.sleep, 0)
def test_callInThread(self):
"""
Test callInThread functionality: set a C{threading.Event}, and check
that it's not in the main thread.
"""
def cb(ign):
waiter = threading.Event()
result = []
def threadedFunc():
result.append(threadable.isInIOThread())
waiter.set()
reactor.callInThread(threadedFunc)
waiter.wait(120)
if not waiter.isSet():
self.fail("Timed out waiting for event.")
else:
self.assertEqual(result, [False])
return self._waitForThread().addCallback(cb)
def test_callFromThread(self):
"""
Test callFromThread functionality: from the main thread, and from
another thread.
"""
def cb(ign):
firedByReactorThread = defer.Deferred()
firedByOtherThread = defer.Deferred()
def threadedFunc():
reactor.callFromThread(firedByOtherThread.callback, None)
reactor.callInThread(threadedFunc)
reactor.callFromThread(firedByReactorThread.callback, None)
return defer.DeferredList(
[firedByReactorThread, firedByOtherThread],
fireOnOneErrback=True)
return self._waitForThread().addCallback(cb)
def test_wakerOverflow(self):
"""
Try to make an overflow on the reactor waker using callFromThread.
"""
def cb(ign):
self.failure = None
waiter = threading.Event()
def threadedFunction():
# Hopefully a hundred thousand queued calls is enough to
# trigger the error condition
for i in xrange(100000):
try:
reactor.callFromThread(lambda: None)
except:
self.failure = failure.Failure()
break
waiter.set()
reactor.callInThread(threadedFunction)
waiter.wait(120)
if not waiter.isSet():
self.fail("Timed out waiting for event")
if self.failure is not None:
return defer.fail(self.failure)
return self._waitForThread().addCallback(cb)
def _testBlockingCallFromThread(self, reactorFunc):
"""
Utility method to test L{threads.blockingCallFromThread}.
"""
waiter = threading.Event()
results = []
errors = []
def cb1(ign):
def threadedFunc():
try:
r = threads.blockingCallFromThread(reactor, reactorFunc)
except Exception, e:
errors.append(e)
else:
results.append(r)
waiter.set()
reactor.callInThread(threadedFunc)
return threads.deferToThread(waiter.wait, self.getTimeout())
def cb2(ign):
if not waiter.isSet():
self.fail("Timed out waiting for event")
return results, errors
return self._waitForThread().addCallback(cb1).addBoth(cb2)
def test_blockingCallFromThread(self):
"""
Test blockingCallFromThread facility: create a thread, call a function
in the reactor using L{threads.blockingCallFromThread}, and verify the
result returned.
"""
def reactorFunc():
return defer.succeed("foo")
def cb(res):
self.assertEqual(res[0][0], "foo")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
def test_asyncBlockingCallFromThread(self):
"""
Test blockingCallFromThread as above, but be sure the resulting
Deferred is not already fired.
"""
def reactorFunc():
d = defer.Deferred()
reactor.callLater(0.1, d.callback, "egg")
return d
def cb(res):
self.assertEqual(res[0][0], "egg")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
def test_errorBlockingCallFromThread(self):
"""
Test error report for blockingCallFromThread.
"""
def reactorFunc():
return defer.fail(RuntimeError("bar"))
def cb(res):
self.assert_(isinstance(res[1][0], RuntimeError))
self.assertEqual(res[1][0].args[0], "bar")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
def test_asyncErrorBlockingCallFromThread(self):
"""
Test error report for blockingCallFromThread as above, but be sure the
resulting Deferred is not already fired.
"""
def reactorFunc():
d = defer.Deferred()
reactor.callLater(0.1, d.errback, RuntimeError("spam"))
return d
def cb(res):
self.assert_(isinstance(res[1][0], RuntimeError))
self.assertEqual(res[1][0].args[0], "spam")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
class Counter:
index = 0
problem = 0
def add(self):
"""A non thread-safe method."""
next = self.index + 1
# another thread could jump in here and increment self.index on us
if next != self.index + 1:
self.problem = 1
raise ValueError
# or here, same issue but we wouldn't catch it. We'd overwrite
# their results, and the index will have lost a count. If
# several threads get in here, we will actually make the count
# go backwards when we overwrite it.
self.index = next
class DeferredResultTestCase(unittest.TestCase):
"""
Test twisted.internet.threads.
"""
def setUp(self):
reactor.suggestThreadPoolSize(8)
def tearDown(self):
reactor.suggestThreadPoolSize(0)
def testCallMultiple(self):
L = []
N = 10
d = defer.Deferred()
def finished():
self.assertEqual(L, range(N))
d.callback(None)
threads.callMultipleInThread([
(L.append, (i,), {}) for i in xrange(N)
] + [(reactor.callFromThread, (finished,), {})])
return d
def test_deferredResult(self):
"""
L{threads.deferToThread} executes the function passed, and correctly
handles the positional and keyword arguments given.
"""
d = threads.deferToThread(lambda x, y=5: x + y, 3, y=4)
d.addCallback(self.assertEqual, 7)
return d
def test_deferredFailure(self):
"""
Check that L{threads.deferToThread} return a failure object
with an appropriate exception instance when the called
function raises an exception.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
d = threads.deferToThread(raiseError)
return self.assertFailure(d, NewError)
def test_deferredFailureAfterSuccess(self):
"""
Check that a successfull L{threads.deferToThread} followed by a one
that raises an exception correctly result as a failure.
"""
# set up a condition that causes cReactor to hang. These conditions
# can also be set by other tests when the full test suite is run in
# alphabetical order (test_flow.FlowTest.testThreaded followed by
# test_internet.ReactorCoreTestCase.testStop, to be precise). By
# setting them up explicitly here, we can reproduce the hang in a
# single precise test case instead of depending upon side effects of
# other tests.
#
# alas, this test appears to flunk the default reactor too
d = threads.deferToThread(lambda: None)
d.addCallback(lambda ign: threads.deferToThread(lambda: 1//0))
return self.assertFailure(d, ZeroDivisionError)
class DeferToThreadPoolTestCase(unittest.TestCase):
"""
Test L{twisted.internet.threads.deferToThreadPool}.
"""
def setUp(self):
self.tp = threadpool.ThreadPool(0, 8)
self.tp.start()
def tearDown(self):
self.tp.stop()
def test_deferredResult(self):
"""
L{threads.deferToThreadPool} executes the function passed, and
correctly handles the positional and keyword arguments given.
"""
d = threads.deferToThreadPool(reactor, self.tp,
lambda x, y=5: x + y, 3, y=4)
d.addCallback(self.assertEqual, 7)
return d
def test_deferredFailure(self):
"""
Check that L{threads.deferToThreadPool} return a failure object with an
appropriate exception instance when the called function raises an
exception.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
d = threads.deferToThreadPool(reactor, self.tp, raiseError)
return self.assertFailure(d, NewError)
_callBeforeStartupProgram = """
import time
import %(reactor)s
%(reactor)s.install()
from twisted.internet import reactor
def threadedCall():
print 'threaded call'
reactor.callInThread(threadedCall)
# Spin very briefly to try to give the thread a chance to run, if it
# is going to. Is there a better way to achieve this behavior?
for i in xrange(100):
time.sleep(0.0)
"""
class ThreadStartupProcessProtocol(protocol.ProcessProtocol):
def __init__(self, finished):
self.finished = finished
self.out = []
self.err = []
def outReceived(self, out):
self.out.append(out)
def errReceived(self, err):
self.err.append(err)
def processEnded(self, reason):
self.finished.callback((self.out, self.err, reason))
class StartupBehaviorTestCase(unittest.TestCase):
"""
Test cases for the behavior of the reactor threadpool near startup
boundary conditions.
In particular, this asserts that no threaded calls are attempted
until the reactor starts up, that calls attempted before it starts
are in fact executed once it has started, and that in both cases,
the reactor properly cleans itself up (which is tested for
somewhat implicitly, by requiring a child process be able to exit,
something it cannot do unless the threadpool has been properly
torn down).
"""
def testCallBeforeStartupUnexecuted(self):
progname = self.mktemp()
progfile = file(progname, 'w')
progfile.write(_callBeforeStartupProgram % {'reactor': reactor.__module__})
progfile.close()
def programFinished((out, err, reason)):
if reason.check(error.ProcessTerminated):
self.fail("Process did not exit cleanly (out: %s err: %s)" % (out, err))
if err:
log.msg("Unexpected output on standard error: %s" % (err,))
self.failIf(out, "Expected no output, instead received:\n%s" % (out,))
def programTimeout(err):
err.trap(error.TimeoutError)
proto.signalProcess('KILL')
return err
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
d = defer.Deferred().addCallbacks(programFinished, programTimeout)
proto = ThreadStartupProcessProtocol(d)
reactor.spawnProcess(proto, sys.executable, ('python', progname), env)
return d
if interfaces.IReactorThreads(reactor, None) is None:
for cls in (ReactorThreadsTestCase,
DeferredResultTestCase,
StartupBehaviorTestCase):
cls.skip = "No thread support, nothing to test here."
else:
import threading
if interfaces.IReactorProcess(reactor, None) is None:
for cls in (StartupBehaviorTestCase,):
cls.skip = "No process support, cannot run subprocess thread tests."
| gpl-2.0 |
TwoD/ansible | plugins/inventory/freeipa.py | 17 | 2084 | #!/usr/bin/env python
import argparse
from ipalib import api
import json
def initialize():
'''
This function initializes the FreeIPA/IPA API. This function requires
no arguments. A kerberos key must be present in the users keyring in
order for this to work.
'''
api.bootstrap(context='cli')
api.finalize()
api.Backend.xmlclient.connect()
return api
def list_groups(api):
'''
This function returns a list of all host groups. This function requires
one argument, the FreeIPA/IPA API object.
'''
inventory = {}
hostvars={}
meta={}
result = api.Command.hostgroup_find()['result']
for hostgroup in result:
inventory[hostgroup['cn'][0]] = { 'hosts': [host for host in hostgroup['member_host']]}
for host in hostgroup['member_host']:
hostvars[host] = {}
inventory['_meta'] = {'hostvars': hostvars}
inv_string = json.dumps(inventory, indent=1, sort_keys=True)
print inv_string
return None
def parse_args():
'''
This function parses the arguments that were passed in via the command line.
This function expects no arguments.
'''
parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specified host')
return parser.parse_args()
def print_host(host):
'''
This function is really a stub, it could return variables to be used in
a playbook. However, at this point there are no variables stored in
FreeIPA/IPA.
This function expects one string, this hostname to lookup variables for.
'''
print json.dumps({})
return None
if __name__ == '__main__':
args = parse_args()
if args.host:
print_host(args.host)
elif args.list:
api = initialize()
list_groups(api)
| gpl-3.0 |
mattf/kubernetes | hack/verify-flags-underscore.py | 83 | 8826 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'Godeps' in dirs:
dirs.remove('Godeps')
if '_gopath' in dirs:
dirs.remove('_gopath')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
for name in files:
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', '_gopath', 'third_party', '.git', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
if f.endswith(".md"):
continue
if f.endswith(".yaml"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
print("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false positives you should run `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
sserrot/champion_relationships | venv/Lib/site-packages/ipywidgets/widgets/interaction.py | 1 | 20907 | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Interact with functions using widgets."""
from __future__ import print_function
from __future__ import division
try: # Python >= 3.3
from inspect import signature, Parameter
except ImportError:
from IPython.utils.signatures import signature, Parameter
from inspect import getcallargs
try:
from inspect import getfullargspec as check_argspec
except ImportError:
from inspect import getargspec as check_argspec # py2
import sys
from IPython.core.getipython import get_ipython
from . import (ValueWidget, Text,
FloatSlider, IntSlider, Checkbox, Dropdown,
VBox, Button, DOMWidget, Output)
from IPython.display import display, clear_output
from ipython_genutils.py3compat import string_types, unicode_type
from traitlets import HasTraits, Any, Unicode, observe
from numbers import Real, Integral
from warnings import warn
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping # py2
empty = Parameter.empty
def show_inline_matplotlib_plots():
"""Show matplotlib plots immediately if using the inline backend.
With ipywidgets 6.0, matplotlib plots don't work well with interact when
using the inline backend that comes with ipykernel. Basically, the inline
backend only shows the plot after the entire cell executes, which does not
play well with drawing plots inside of an interact function. See
https://github.com/jupyter-widgets/ipywidgets/issues/1181/ and
https://github.com/ipython/ipython/issues/10376 for more details. This
function displays any matplotlib plots if the backend is the inline backend.
"""
if 'matplotlib' not in sys.modules:
# matplotlib hasn't been imported, nothing to do.
return
try:
import matplotlib as mpl
from ipykernel.pylab.backend_inline import flush_figures
except ImportError:
return
if mpl.get_backend() == 'module://ipykernel.pylab.backend_inline':
flush_figures()
def interactive_output(f, controls):
"""Connect widget controls to a function.
This function does not generate a user interface for the widgets (unlike `interact`).
This enables customisation of the widget user interface layout.
The user interface layout must be defined and displayed manually.
"""
out = Output()
def observer(change):
kwargs = {k:v.value for k,v in controls.items()}
show_inline_matplotlib_plots()
with out:
clear_output(wait=True)
f(**kwargs)
show_inline_matplotlib_plots()
for k,w in controls.items():
w.observe(observer, 'value')
show_inline_matplotlib_plots()
observer(None)
return out
def _matches(o, pattern):
"""Match a pattern of types in a sequence."""
if not len(o) == len(pattern):
return False
comps = zip(o,pattern)
return all(isinstance(obj,kind) for obj,kind in comps)
def _get_min_max_value(min, max, value=None, step=None):
"""Return min, max, value given input values with possible None."""
# Either min and max need to be given, or value needs to be given
if value is None:
if min is None or max is None:
raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))
diff = max - min
value = min + (diff / 2)
# Ensure that value has the same type as diff
if not isinstance(value, type(diff)):
value = min + (diff // 2)
else: # value is not None
if not isinstance(value, Real):
raise TypeError('expected a real number, got: %r' % value)
# Infer min/max from value
if value == 0:
# This gives (0, 1) of the correct type
vrange = (value, value + 1)
elif value > 0:
vrange = (-value, 3*value)
else:
vrange = (3*value, -value)
if min is None:
min = vrange[0]
if max is None:
max = vrange[1]
if step is not None:
# ensure value is on a step
tick = int((value - min) / step)
value = min + tick * step
if not min <= value <= max:
raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))
return min, max, value
def _yield_abbreviations_for_parameter(param, kwargs):
"""Get an abbreviation for a function parameter."""
name = param.name
kind = param.kind
ann = param.annotation
default = param.default
not_found = (name, empty, empty)
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY):
if name in kwargs:
value = kwargs.pop(name)
elif ann is not empty:
warn("Using function annotations to implicitly specify interactive controls is deprecated. Use an explicit keyword argument for the parameter instead.", DeprecationWarning)
value = ann
elif default is not empty:
value = default
else:
yield not_found
yield (name, value, default)
elif kind == Parameter.VAR_KEYWORD:
# In this case name=kwargs and we yield the items in kwargs with their keys.
for k, v in kwargs.copy().items():
kwargs.pop(k)
yield k, v, empty
class interactive(VBox):
"""
A VBox container containing a group of interactive widgets tied to a
function.
Parameters
----------
__interact_f : function
The function to which the interactive widgets are tied. The `**kwargs`
should match the function signature.
__options : dict
A dict of options. Currently, the only supported keys are
``"manual"`` and ``"manual_name"``.
**kwargs : various, optional
An interactive widget is created for each keyword argument that is a
valid widget abbreviation.
Note that the first two parameters intentionally start with a double
underscore to avoid being mixed up with keyword arguments passed by
``**kwargs``.
"""
def __init__(self, __interact_f, __options={}, **kwargs):
VBox.__init__(self, _dom_classes=['widget-interact'])
self.result = None
self.args = []
self.kwargs = {}
self.f = f = __interact_f
self.clear_output = kwargs.pop('clear_output', True)
self.manual = __options.get("manual", False)
self.manual_name = __options.get("manual_name", "Run Interact")
self.auto_display = __options.get("auto_display", False)
new_kwargs = self.find_abbreviations(kwargs)
# Before we proceed, let's make sure that the user has passed a set of args+kwargs
# that will lead to a valid call of the function. This protects against unspecified
# and doubly-specified arguments.
try:
check_argspec(f)
except TypeError:
# if we can't inspect, we can't validate
pass
else:
getcallargs(f, **{n:v for n,v,_ in new_kwargs})
# Now build the widgets from the abbreviations.
self.kwargs_widgets = self.widgets_from_abbreviations(new_kwargs)
# This has to be done as an assignment, not using self.children.append,
# so that traitlets notices the update. We skip any objects (such as fixed) that
# are not DOMWidgets.
c = [w for w in self.kwargs_widgets if isinstance(w, DOMWidget)]
# If we are only to run the function on demand, add a button to request this.
if self.manual:
self.manual_button = Button(description=self.manual_name)
c.append(self.manual_button)
self.out = Output()
c.append(self.out)
self.children = c
# Wire up the widgets
# If we are doing manual running, the callback is only triggered by the button
# Otherwise, it is triggered for every trait change received
# On-demand running also suppresses running the function with the initial parameters
if self.manual:
self.manual_button.on_click(self.update)
# Also register input handlers on text areas, so the user can hit return to
# invoke execution.
for w in self.kwargs_widgets:
if isinstance(w, Text):
w.on_submit(self.update)
else:
for widget in self.kwargs_widgets:
widget.observe(self.update, names='value')
self.on_displayed(self.update)
# Callback function
def update(self, *args):
"""
Call the interact function and update the output widget with
the result of the function call.
Parameters
----------
*args : ignored
Required for this method to be used as traitlets callback.
"""
self.kwargs = {}
if self.manual:
self.manual_button.disabled = True
try:
show_inline_matplotlib_plots()
with self.out:
if self.clear_output:
clear_output(wait=True)
for widget in self.kwargs_widgets:
value = widget.get_interact_value()
self.kwargs[widget._kwarg] = value
self.result = self.f(**self.kwargs)
show_inline_matplotlib_plots()
if self.auto_display and self.result is not None:
display(self.result)
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warn("Exception in interact callback: %s", e, exc_info=True)
else:
ip.showtraceback()
finally:
if self.manual:
self.manual_button.disabled = False
# Find abbreviations
def signature(self):
return signature(self.f)
def find_abbreviations(self, kwargs):
"""Find the abbreviations for the given function and kwargs.
Return (name, abbrev, default) tuples.
"""
new_kwargs = []
try:
sig = self.signature()
except (ValueError, TypeError):
# can't inspect, no info from function; only use kwargs
return [ (key, value, value) for key, value in kwargs.items() ]
for param in sig.parameters.values():
for name, value, default in _yield_abbreviations_for_parameter(param, kwargs):
if value is empty:
raise ValueError('cannot find widget or abbreviation for argument: {!r}'.format(name))
new_kwargs.append((name, value, default))
return new_kwargs
# Abbreviations to widgets
def widgets_from_abbreviations(self, seq):
"""Given a sequence of (name, abbrev, default) tuples, return a sequence of Widgets."""
result = []
for name, abbrev, default in seq:
widget = self.widget_from_abbrev(abbrev, default)
if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):
if widget is None:
raise ValueError("{!r} cannot be transformed to a widget".format(abbrev))
else:
raise TypeError("{!r} is not a ValueWidget".format(widget))
if not widget.description:
widget.description = name
widget._kwarg = name
result.append(widget)
return result
@classmethod
def widget_from_abbrev(cls, abbrev, default=empty):
"""Build a ValueWidget instance given an abbreviation or Widget."""
if isinstance(abbrev, ValueWidget) or isinstance(abbrev, fixed):
return abbrev
if isinstance(abbrev, tuple):
widget = cls.widget_from_tuple(abbrev)
if default is not empty:
try:
widget.value = default
except Exception:
# ignore failure to set default
pass
return widget
# Try single value
widget = cls.widget_from_single_value(abbrev)
if widget is not None:
return widget
# Something iterable (list, dict, generator, ...). Note that str and
# tuple should be handled before, that is why we check this case last.
if isinstance(abbrev, Iterable):
widget = cls.widget_from_iterable(abbrev)
if default is not empty:
try:
widget.value = default
except Exception:
# ignore failure to set default
pass
return widget
# No idea...
return None
@staticmethod
def widget_from_single_value(o):
"""Make widgets from single values, which can be used as parameter defaults."""
if isinstance(o, string_types):
return Text(value=unicode_type(o))
elif isinstance(o, bool):
return Checkbox(value=o)
elif isinstance(o, Integral):
min, max, value = _get_min_max_value(None, None, o)
return IntSlider(value=o, min=min, max=max)
elif isinstance(o, Real):
min, max, value = _get_min_max_value(None, None, o)
return FloatSlider(value=o, min=min, max=max)
else:
return None
@staticmethod
def widget_from_tuple(o):
"""Make widgets from a tuple abbreviation."""
if _matches(o, (Real, Real)):
min, max, value = _get_min_max_value(o[0], o[1])
if all(isinstance(_, Integral) for _ in o):
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, min=min, max=max)
elif _matches(o, (Real, Real, Real)):
step = o[2]
if step <= 0:
raise ValueError("step must be >= 0, not %r" % step)
min, max, value = _get_min_max_value(o[0], o[1], step=step)
if all(isinstance(_, Integral) for _ in o):
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, min=min, max=max, step=step)
@staticmethod
def widget_from_iterable(o):
"""Make widgets from an iterable. This should not be done for
a string or tuple."""
# Dropdown expects a dict or list, so we convert an arbitrary
# iterable to either of those.
if isinstance(o, (list, dict)):
return Dropdown(options=o)
elif isinstance(o, Mapping):
return Dropdown(options=list(o.items()))
else:
return Dropdown(options=list(o))
# Return a factory for interactive functions
@classmethod
def factory(cls):
options = dict(manual=False, auto_display=True, manual_name="Run Interact")
return _InteractFactory(cls, options)
class _InteractFactory(object):
"""
Factory for instances of :class:`interactive`.
This class is needed to support options like::
>>> @interact.options(manual=True)
... def greeting(text="World"):
... print("Hello {}".format(text))
Parameters
----------
cls : class
The subclass of :class:`interactive` to construct.
options : dict
A dict of options used to construct the interactive
function. By default, this is returned by
``cls.default_options()``.
kwargs : dict
A dict of **kwargs to use for widgets.
"""
def __init__(self, cls, options, kwargs={}):
self.cls = cls
self.opts = options
self.kwargs = kwargs
def widget(self, f):
"""
Return an interactive function widget for the given function.
The widget is only constructed, not displayed nor attached to
the function.
Returns
-------
An instance of ``self.cls`` (typically :class:`interactive`).
Parameters
----------
f : function
The function to which the interactive widgets are tied.
"""
return self.cls(f, self.opts, **self.kwargs)
def __call__(self, __interact_f=None, **kwargs):
"""
Make the given function interactive by adding and displaying
the corresponding :class:`interactive` widget.
Expects the first argument to be a function. Parameters to this
function are widget abbreviations passed in as keyword arguments
(``**kwargs``). Can be used as a decorator (see examples).
Returns
-------
f : __interact_f with interactive widget attached to it.
Parameters
----------
__interact_f : function
The function to which the interactive widgets are tied. The `**kwargs`
should match the function signature. Passed to :func:`interactive()`
**kwargs : various, optional
An interactive widget is created for each keyword argument that is a
valid widget abbreviation. Passed to :func:`interactive()`
Examples
--------
Render an interactive text field that shows the greeting with the passed in
text::
# 1. Using interact as a function
def greeting(text="World"):
print("Hello {}".format(text))
interact(greeting, text="IPython Widgets")
# 2. Using interact as a decorator
@interact
def greeting(text="World"):
print("Hello {}".format(text))
# 3. Using interact as a decorator with named parameters
@interact(text="IPython Widgets")
def greeting(text="World"):
print("Hello {}".format(text))
Render an interactive slider widget and prints square of number::
# 1. Using interact as a function
def square(num=1):
print("{} squared is {}".format(num, num*num))
interact(square, num=5)
# 2. Using interact as a decorator
@interact
def square(num=2):
print("{} squared is {}".format(num, num*num))
# 3. Using interact as a decorator with named parameters
@interact(num=5)
def square(num=2):
print("{} squared is {}".format(num, num*num))
"""
# If kwargs are given, replace self by a new
# _InteractFactory with the updated kwargs
if kwargs:
kw = dict(self.kwargs)
kw.update(kwargs)
self = type(self)(self.cls, self.opts, kw)
f = __interact_f
if f is None:
# This branch handles the case 3
# @interact(a=30, b=40)
# def f(*args, **kwargs):
# ...
#
# Simply return the new factory
return self
# positional arg support in: https://gist.github.com/8851331
# Handle the cases 1 and 2
# 1. interact(f, **kwargs)
# 2. @interact
# def f(*args, **kwargs):
# ...
w = self.widget(f)
try:
f.widget = w
except AttributeError:
# some things (instancemethods) can't have attributes attached,
# so wrap in a lambda
f = lambda *args, **kwargs: __interact_f(*args, **kwargs)
f.widget = w
show_inline_matplotlib_plots()
display(w)
return f
def options(self, **kwds):
"""
Change options for interactive functions.
Returns
-------
A new :class:`_InteractFactory` which will apply the
options when called.
"""
opts = dict(self.opts)
for k in kwds:
try:
# Ensure that the key exists because we want to change
# existing options, not add new ones.
_ = opts[k]
except KeyError:
raise ValueError("invalid option {!r}".format(k))
opts[k] = kwds[k]
return type(self)(self.cls, opts, self.kwargs)
interact = interactive.factory()
interact_manual = interact.options(manual=True, manual_name="Run Interact")
class fixed(HasTraits):
"""A pseudo-widget whose value is fixed and never synced to the client."""
value = Any(help="Any Python object")
description = Unicode('', help="Any Python object")
def __init__(self, value, **kwargs):
super(fixed, self).__init__(value=value, **kwargs)
def get_interact_value(self):
"""Return the value for this widget which should be passed to
interactive functions. Custom widgets can change this method
to process the raw value ``self.value``.
"""
return self.value
| mit |
synicalsyntax/zulip | analytics/tests/test_fixtures.py | 7 | 1528 | from analytics.lib.counts import CountStat
from analytics.lib.fixtures import generate_time_series_data
from zerver.lib.test_classes import ZulipTestCase
# A very light test suite; the code being tested is not run in production.
class TestFixtures(ZulipTestCase):
def test_deterministic_settings(self) -> None:
# test basic business_hour / non_business_hour calculation
# test we get an array of the right length with frequency=CountStat.DAY
data = generate_time_series_data(
days=7, business_hours_base=20, non_business_hours_base=15, spikiness=0)
self.assertEqual(data, [400, 400, 400, 400, 400, 360, 360])
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=1500,
growth=2, spikiness=0, frequency=CountStat.HOUR)
# test we get an array of the right length with frequency=CountStat.HOUR
self.assertEqual(len(data), 24)
# test that growth doesn't affect the first data point
self.assertEqual(data[0], 2000)
# test that the last data point is growth times what it otherwise would be
self.assertEqual(data[-1], 1500*2)
# test autocorrelation == 1, since that's the easiest value to test
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=2000,
autocorrelation=1, frequency=CountStat.HOUR)
self.assertEqual(data[0], data[1])
self.assertEqual(data[0], data[-1])
| apache-2.0 |
Pajn/RAXA-Django | automation/widgets.py | 1 | 6578 | '''
Copyright (C) 2013 Rasmus Eneman <rasmus@eneman.eu>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.core.urlresolvers import reverse
from string import Template
from django.forms import Widget
from django.utils.safestring import mark_safe
class DeviceActionWidget(Widget):
def __init__(self, value=None, *args, **kwargs):
super(DeviceActionWidget, self).__init__(*args, **kwargs)
self.value = value
def render(self, name, value, attrs=None):
tpl = Template('''
<input type="hidden" id="id_$name" name="$name" value="$value" />
<div id="id_div_$name"></div>
<script>
$('#id_action_object').on('change', function() {
get_action_widget();
});
function get_action_widget() {
var id = $('#id_action_object').val();
var value = $('#id_$name').val();
if (id == '') {
$('#id_div_$name').html('');
return false;
}
$('#id_div_$name').load('$url', {'device': id, 'value': value}, function () {
fix_sliders();
});
}
get_action_widget();
function fix_sliders() {
$('input[type="range"]').each(function (index, input) {
var $input, $label, $slider, $container;
$input = $(input);
$container = $('<div class="slider" style="width: 195px"/>');
$label = $('<span class="label ui-widget-content ui-corner-all">' + parseInt($input.attr('value'), 10) + '</span>');
//Create a new div, turn it into a slider, and set its attributes based on
//the attributes of the input.
$slider = $('<div style="float: right;width: 150px"/>').slider({
min: parseInt($input.attr('min'), 10),
max: parseInt($input.attr('max'), 10),
value: parseInt($input.attr('value'), 10),
step: parseInt($input.attr('step'), 10),
slide: function (event, ui) {
//Keep the value of the input[type=range] in sync with the slider.
//$(this).prev('input').val(ui.value);
$input.val(ui.value);
$input.change();
$label.text(ui.value);
}
});
//Append the slider after the input and hide the input. The user will only
//interact with the slider.
$input.hide();
$input.after($container);
$container.append($label);
$label.after($slider);
});
}
</script>''')
return mark_safe(tpl.safe_substitute(name=name, value=value, url=reverse('desktop.views.widget_action')))
class ThermometerHelperWidget(Widget):
is_hidden = True
def render(self, name, value, attrs=None):
return mark_safe('''
<script>
$('input[name=trigger]').on('change', function() {
show_hide_fields();
});
function show_hide_fields() {
var val = parseInt($('input[name=trigger]:checked').val());
switch(val) {
case 0:
$('#id_temperature').parent().hide();
$('#id_start').parent().hide();
$('#id_end').parent().hide();
break;
case 1:
case 2:
$('#id_temperature').parent().show();
$('#id_start').parent().hide();
$('#id_end').parent().hide();
break;
case 3:
$('#id_temperature').parent().hide();
$('#id_start').parent().show();
$('#id_end').parent().show();
break;
}
}
show_hide_fields();
</script>''')
class CounterHelperWidget(Widget):
is_hidden = True
def render(self, name, value, attrs=None):
return mark_safe('''
<script>
$('input[name=trigger]').on('change', function() {
show_hide_fields();
});
function show_hide_fields() {
var val = parseInt($('input[name=trigger]:checked').val());
switch(val) {
case 0:
case 1:
case 2:
$('#id_value').parent().show();
$('#id_start').parent().hide();
$('#id_end').parent().hide();
break;
case 3:
$('#id_value').parent().hide();
$('#id_start').parent().show();
$('#id_end').parent().show();
break;
case 4:
case 5:
case 6:
$('#id_value').parent().hide();
$('#id_start').parent().hide();
$('#id_end').parent().hide();
break;
}
}
show_hide_fields();
</script>''') | agpl-3.0 |
Ziqi-Li/bknqgis | bokeh/examples/plotting/file/sizing_mode.py | 9 | 1107 | import numpy as np
from bokeh.plotting import figure, show, output_file
from bokeh.layouts import column
from bokeh.models import CustomJS
from bokeh.models.widgets import Select
from bokeh.core.enums import SizingMode
N = 4000
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = [
"#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)
]
TOOLS="hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select"
sizing_mode = "fixed"
select = Select(title="Sizing mode", value=sizing_mode, options=list(SizingMode))
plot = figure(tools=TOOLS)
plot.scatter(x, y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)
layout = column(select, plot, sizing_mode=sizing_mode)
select.js_on_change('value', CustomJS(args=dict(layout=layout, plot=plot), code="""
var sizing_mode = this.value;
layout.sizing_mode = sizing_mode;
plot.sizing_mode = sizing_mode;
"""))
output_file("sizing_mode.html", title="sizing_mode.py example")
show(layout)
| gpl-2.0 |
herloct/FrameworkBenchmarks | frameworks/Python/tornado/server.py | 21 | 4283 | #!/usr/bin/env python
import sys
import json
from random import randint
import momoko
import motor
import tornado.ioloop
import tornado.web
from tornado import gen
import tornado.options
from tornado.options import options
import tornado.httpserver
PY3 = False
if sys.version_info[0] == 3:
PY3 = True
xrange = range
tornado.options.define('port', default=8888, type=int, help="Server port")
tornado.options.define('mongo', default='localhost', type=str, help="MongoDB host")
tornado.options.define('postgres', default=None, type=str, help="PostgreSQL host")
class BaseHandler(tornado.web.RequestHandler):
def compute_etag(self):
return None
class JsonSerializeTestHandler(BaseHandler):
def get(self):
obj = {"message": "Hello, World!", }
self.write(obj)
class PlaintextHandler(BaseHandler):
def get(self):
self.set_header('Content-Type', 'text/plain')
self.write(b"Hello, World!")
class DBTestHandler(BaseHandler):
@gen.coroutine
def get(self):
world = yield db.World.find_one(randint(1, 10000))
# Get first postion on arguments, and so first postion in mongo return
world['id'] = int(world.pop('_id'))
world['randomNumber'] = int(world['randomNumber'])
response = json.dumps(world)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(response)
class QueryTestHandler(BaseHandler):
@gen.coroutine
def get(self):
try:
queries = int(self.get_argument("queries"))
except Exception:
queries = 1
else:
if queries < 1:
queries = 1
elif queries > 500:
queries = 500
worlds = yield [db.World.find_one(randint(1, 10000))
for _ in xrange(queries)]
for world in worlds:
# Get first postion on arguments, and so first postion in mongo return
world['id'] = int(world.pop('_id'))
world['randomNumber'] = int(world['randomNumber'])
response = json.dumps(worlds)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(response)
class QueryPostgresRawTestHandler(BaseHandler):
@gen.coroutine
def get(self):
sql = "SELECT id, randomNumber FROM World WHERE id=%s"
random_id = randint(1, 10000)
cursor = yield self.application.db.execute(sql, (random_id,))
row = cursor.fetchone()
response = json.dumps({"id": row[0], "randomNumber": row[1]})
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(response)
class MultipleQueriesPostgresRawTestHandler(BaseHandler):
@gen.coroutine
def get(self):
queries = self.get_argument("queries", "1")
try:
queries = int(queries.strip())
except ValueError:
queries = 1
queries = min(max(1, queries), 500)
sql = "SELECT id, randomNumber FROM World WHERE id=%s"
cursors = yield [self.application.db.execute(sql, (randint(1, 10000),)) for _ in xrange(int(queries))]
rows = [cursor.fetchone() for cursor in cursors]
worlds = [{"id": row[0], "randomNumber":row[1]} for row in rows]
response = json.dumps(worlds)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(response)
application = tornado.web.Application([
(r"/json", JsonSerializeTestHandler),
(r"/plaintext", PlaintextHandler),
(r"/db", DBTestHandler),
(r"/queries", QueryTestHandler),
(r"/dbraw", QueryPostgresRawTestHandler),
(r"/queriesraw", MultipleQueriesPostgresRawTestHandler)
])
if __name__ == "__main__":
tornado.options.parse_command_line()
server = tornado.httpserver.HTTPServer(application)
server.bind(options.port)
server.start(0)
ioloop = tornado.ioloop.IOLoop.instance()
if options.postgres:
dsn = "user=benchmarkdbuser password=benchmarkdbpass dbname=hello_world host=%s" % options.postgres
application.db = momoko.Pool(dsn, size=1, max_size=100)
ioloop.run_sync(application.db.connect)
else:
db = motor.MotorClient(options.mongo).hello_world
ioloop.start()
| bsd-3-clause |
jbradberry/dxr | dxr/cli/__init__.py | 7 | 1146 | """Command-line interface for DXR"""
from os.path import basename
from sys import argv
from click import ClickException, group
from dxr.cli.clean import clean
from dxr.cli.delete import delete
from dxr.cli.deploy import deploy
from dxr.cli.index import index
from dxr.cli.list import list
from dxr.cli.serve import serve
from dxr.cli.shell import shell
def main():
"""Invoke Click's top level without swallowing the tracebacks produced by
control-C.
The swallowing makes it difficult to debug hangs.
"""
try:
# We can't call BaseCommand.main(), because it re-raises
# KeyboardInterrupts as Aborts, obscuring the original source of the
# exception.
with dxr.make_context(basename(argv[0]), argv[1:]) as ctx:
return dxr.invoke(ctx)
except ClickException as exc:
exc.show()
return exc.exit_code
@group()
def dxr():
"""Pass dxr COMMAND --help to learn more about an individual command."""
dxr.add_command(clean)
dxr.add_command(delete)
dxr.add_command(deploy)
dxr.add_command(index)
dxr.add_command(list)
dxr.add_command(serve)
dxr.add_command(shell)
| mit |
shakamunyi/nova | nova/tests/functional/v3/api_sample_base.py | 6 | 3041 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo.config import cfg
from nova.api.openstack import API_V3_CORE_EXTENSIONS # noqa
from nova import test
from nova.tests.functional import api_samples_test_base
from nova.tests.unit import fake_network
from nova.tests.unit import fake_utils
CONF = cfg.CONF
class ApiSampleTestBaseV3(api_samples_test_base.ApiSampleTestBase):
_api_version = 'v3'
sample_dir = None
extra_extensions_to_load = None
def setUp(self):
self.flags(use_ipv6=False,
osapi_compute_link_prefix=self._get_host(),
osapi_glance_link_prefix=self._get_glance_host())
if not self.all_extensions:
# Set the whitelist to ensure only the extensions we are
# interested in are loaded so the api samples don't include
# data from extensions we are not interested in
whitelist = API_V3_CORE_EXTENSIONS.copy()
if self.extension_name:
whitelist.add(self.extension_name)
if self.extra_extensions_to_load:
whitelist.update(set(self.extra_extensions_to_load))
CONF.set_override('extensions_whitelist', whitelist,
'osapi_v3')
super(ApiSampleTestBaseV3, self).setUp()
self.useFixture(test.SampleNetworks(host=self.network.host))
fake_network.stub_compute_with_ips(self.stubs)
fake_utils.stub_out_utils_spawn_n(self.stubs)
self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
@classmethod
def _get_sample_path(cls, name, dirname, suffix=''):
parts = [dirname]
parts.append('api_samples')
if cls.all_extensions:
parts.append('all_extensions')
elif cls.sample_dir:
parts.append(cls.sample_dir)
elif cls.extension_name:
parts.append(cls.extension_name)
parts.append(name + "." + cls.ctype + suffix)
return os.path.join(*parts)
@classmethod
def _get_sample(cls, name):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname,
"../../../../doc/v3"))
return cls._get_sample_path(name, dirname)
@classmethod
def _get_template(cls, name):
dirname = os.path.dirname(os.path.abspath(__file__))
return cls._get_sample_path(name, dirname, suffix='.tpl')
| apache-2.0 |
anthonyryan1/xbmc | addons/metadata.demo.movies/demo.py | 21 | 4730 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import xbmcplugin,xbmcgui,xbmc,xbmcaddon
import os,sys,urllib
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params=get_params()
action=urllib.unquote_plus(params["action"])
if action == 'find':
year = 0
title=urllib.unquote_plus(params["title"])
try:
year=int(urllib.unquote_plus(params["year"]))
except:
pass
print('Find movie with title %s from year %i' %(title, int(year)))
liz=xbmcgui.ListItem('Demo movie 1', thumbnailImage='DefaultVideo.png', offscreen=True)
liz.setProperty('relevance', '0.5')
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url="/path/to/movie", listitem=liz, isFolder=True)
liz=xbmcgui.ListItem('Demo movie 2', thumbnailImage='DefaultVideo.png', offscreen=True)
liz.setProperty('relevance', '0.3')
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url="/path/to/movie2", listitem=liz, isFolder=True)
elif action == 'getdetails':
url=urllib.unquote_plus(params["url"])
if url == '/path/to/movie':
liz=xbmcgui.ListItem('Demo movie 1', offscreen=True)
liz.setInfo('video',
{'title': 'Demo movie 1',
'originaltitle': 'Demo måvie 1',
'sorttitle': '2',
'userrating': 5,
'top250': 3,
'plotoutline': 'Outline yo',
'plot': 'Plot yo',
'tagline': 'Tag yo',
'duration': 110,
'mpaa': 'T',
'trailer': '/home/akva/bunnies/unicorns.mkv',
'genre': ['Action', 'Comedy'],
'country': ['Norway', 'Sweden', 'China'],
'credits': ['None', 'Want', 'To Admit It'],
'director': ['spiff', 'spiff2'],
'set': 'Spiffy creations',
'setoverview': 'Horrors created by spiff',
'studio': ['Studio1', 'Studio2'],
'dateadded': '2016-01-01',
'premiered': '2015-01-01',
'showlink': ['Demo show 1']
})
#todo: missing actor thumb aspect
liz.setRating("imdb", 9, 100000, True )
liz.setRating("themoviedb", 8.9, 1000)
liz.setUniqueIDs({ 'imdb': 'tt8938399', 'tmdb' : '9837493' }, 'imdb')
liz.setCast([{'name': 'spiff', 'role': 'himself', 'thumbnail': '/home/akva/Pictures/fish.jpg', 'order': 2},
{'name': 'monkey', 'role': 'orange', 'thumbnail': '/home/akva/Pictures/coffee.jpg', 'order': 1}])
liz.addAvailableArtwork('DefaultBackFanart.png', 'banner')
liz.addAvailableArtwork('/home/akva/Pictures/hawaii-shirt.png', 'poster')
liz.setAvailableFanart([{'image': 'DefaultBackFanart.png', 'preview': 'DefaultBackFanart.png'},
{'image': '/home/akva/Pictures/hawaii-shirt.png', 'preview': '/home/akva/Pictures/hawaii-shirt.png'}])
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=liz)
elif action == 'getartwork':
url=urllib.unquote_plus(params["id"])
if url == '456':
liz=xbmcgui.ListItem('Demo movie 1', offscreen=True)
liz.addAvailableArtwork('DefaultBackFanart.png', 'banner')
liz.addAvailableArtwork('/home/akva/Pictures/hawaii-shirt.png', 'poster')
liz.setAvailableFanart([{'image': 'DefaultBackFanart.png', 'preview': 'DefaultBackFanart.png'},
{'image': '/home/akva/Pictures/hawaii-shirt.png', 'preview': '/home/akva/Pictures/hawaii-shirt.png'}])
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=liz)
elif action == 'nfourl':
nfo=urllib.unquote_plus(params["nfo"])
print 'Find url from nfo file'
liz=xbmcgui.ListItem('Demo movie 1', offscreen=True)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url="/path/to/movie1", listitem=liz, isFolder=True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 |
sameetb-cuelogic/edx-platform-test | common/lib/xmodule/xmodule/graders.py | 27 | 17315 | import abc
import inspect
import logging
import random
import sys
from collections import namedtuple
log = logging.getLogger("edx.courseware")
# This is a tuple for holding scores, either from problems or sections.
# Section either indicates the name of the problem or the name of the section
Score = namedtuple("Score", "earned possible graded section")
def aggregate_scores(scores, section_name="summary"):
"""
scores: A list of Score objects
returns: A tuple (all_total, graded_total).
all_total: A Score representing the total score summed over all input scores
graded_total: A Score representing the score summed over all graded input scores
"""
total_correct_graded = sum(score.earned for score in scores if score.graded)
total_possible_graded = sum(score.possible for score in scores if score.graded)
total_correct = sum(score.earned for score in scores)
total_possible = sum(score.possible for score in scores)
#regardless of whether or not it is graded
all_total = Score(total_correct,
total_possible,
False,
section_name)
#selecting only graded things
graded_total = Score(total_correct_graded,
total_possible_graded,
True,
section_name)
return all_total, graded_total
def invalid_args(func, argdict):
"""
Given a function and a dictionary of arguments, returns a set of arguments
from argdict that aren't accepted by func
"""
args, _, keywords, _ = inspect.getargspec(func)
if keywords:
return set() # All accepted
return set(argdict) - set(args)
def grader_from_conf(conf):
"""
This creates a CourseGrader from a configuration (such as in course_settings.py).
The conf can simply be an instance of CourseGrader, in which case no work is done.
More commonly, the conf is a list of dictionaries. A WeightedSubsectionsGrader
with AssignmentFormatGrader's or SingleSectionGrader's as subsections will be
generated. Every dictionary should contain the parameters for making either a
AssignmentFormatGrader or SingleSectionGrader, in addition to a 'weight' key.
"""
if isinstance(conf, CourseGrader):
return conf
subgraders = []
for subgraderconf in conf:
subgraderconf = subgraderconf.copy()
weight = subgraderconf.pop("weight", 0)
# NOTE: 'name' used to exist in SingleSectionGrader. We are deprecating SingleSectionGrader
# and converting everything into an AssignmentFormatGrader by adding 'min_count' and
# 'drop_count'. AssignmentFormatGrader does not expect 'name', so if it appears
# in bad_args, go ahead remove it (this causes no errors). Eventually, SingleSectionGrader
# should be completely removed.
name = 'name'
try:
if 'min_count' in subgraderconf:
#This is an AssignmentFormatGrader
subgrader_class = AssignmentFormatGrader
elif name in subgraderconf:
#This is an SingleSectionGrader
subgrader_class = SingleSectionGrader
else:
raise ValueError("Configuration has no appropriate grader class.")
bad_args = invalid_args(subgrader_class.__init__, subgraderconf)
# See note above concerning 'name'.
if bad_args.issuperset({name}):
bad_args = bad_args - {name}
del subgraderconf[name]
if len(bad_args) > 0:
log.warning("Invalid arguments for a subgrader: %s", bad_args)
for key in bad_args:
del subgraderconf[key]
subgrader = subgrader_class(**subgraderconf)
subgraders.append((subgrader, subgrader.category, weight))
except (TypeError, ValueError) as error:
# Add info and re-raise
msg = ("Unable to parse grader configuration:\n " +
str(subgraderconf) +
"\n Error was:\n " + str(error))
raise ValueError(msg), None, sys.exc_info()[2]
return WeightedSubsectionsGrader(subgraders)
class CourseGrader(object):
"""
A course grader takes the totaled scores for each graded section (that a student has
started) in the course. From these scores, the grader calculates an overall percentage
grade. The grader should also generate information about how that score was calculated,
to be displayed in graphs or charts.
A grader has one required method, grade(), which is passed a grade_sheet. The grade_sheet
contains scores for all graded section that the student has started. If a student has
a score of 0 for that section, it may be missing from the grade_sheet. The grade_sheet
is keyed by section format. Each value is a list of Score namedtuples for each section
that has the matching section format.
The grader outputs a dictionary with the following keys:
- percent: Contains a float value, which is the final percentage score for the student.
- section_breakdown: This is a list of dictionaries which provide details on sections
that were graded. These are used for display in a graph or chart. The format for a
section_breakdown dictionary is explained below.
- grade_breakdown: This is a list of dictionaries which provide details on the contributions
of the final percentage grade. This is a higher level breakdown, for when the grade is constructed
of a few very large sections (such as Homeworks, Labs, a Midterm, and a Final). The format for
a grade_breakdown is explained below. This section is optional.
A dictionary in the section_breakdown list has the following keys:
percent: A float percentage for the section.
label: A short string identifying the section. Preferably fixed-length. E.g. "HW 3".
detail: A string explanation of the score. E.g. "Homework 1 - Ohms Law - 83% (5/6)"
category: A string identifying the category. Items with the same category are grouped together
in the display (for example, by color).
prominent: A boolean value indicating that this section should be displayed as more prominent
than other items.
A dictionary in the grade_breakdown list has the following keys:
percent: A float percentage in the breakdown. All percents should add up to the final percentage.
detail: A string explanation of this breakdown. E.g. "Homework - 10% of a possible 15%"
category: A string identifying the category. Items with the same category are grouped together
in the display (for example, by color).
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def grade(self, grade_sheet, generate_random_scores=False):
'''Given a grade sheet, return a dict containing grading information'''
raise NotImplementedError
class WeightedSubsectionsGrader(CourseGrader):
"""
This grader takes a list of tuples containing (grader, category_name, weight) and computes
a final grade by totalling the contribution of each sub grader and multiplying it by the
given weight. For example, the sections may be
[ (homeworkGrader, "Homework", 0.15), (labGrader, "Labs", 0.15), (midtermGrader, "Midterm", 0.30),
(finalGrader, "Final", 0.40) ]
All items in section_breakdown for each subgrader will be combined. A grade_breakdown will be
composed using the score from each grader.
Note that the sum of the weights is not take into consideration. If the weights add up to
a value > 1, the student may end up with a percent > 100%. This allows for sections that
are extra credit.
"""
def __init__(self, sections):
self.sections = sections
def grade(self, grade_sheet, generate_random_scores=False):
total_percent = 0.0
section_breakdown = []
grade_breakdown = []
for subgrader, category, weight in self.sections:
subgrade_result = subgrader.grade(grade_sheet, generate_random_scores)
weighted_percent = subgrade_result['percent'] * weight
section_detail = u"{0} = {1:.2%} of a possible {2:.2%}".format(category, weighted_percent, weight)
total_percent += weighted_percent
section_breakdown += subgrade_result['section_breakdown']
grade_breakdown.append({'percent': weighted_percent, 'detail': section_detail, 'category': category})
return {'percent': total_percent,
'section_breakdown': section_breakdown,
'grade_breakdown': grade_breakdown}
class SingleSectionGrader(CourseGrader):
"""
This grades a single section with the format 'type' and the name 'name'.
If the name is not appropriate for the short short_label or category, they each may
be specified individually.
"""
def __init__(self, type, name, short_label=None, category=None):
self.type = type
self.name = name
self.short_label = short_label or name
self.category = category or name
def grade(self, grade_sheet, generate_random_scores=False):
found_score = None
if self.type in grade_sheet:
for score in grade_sheet[self.type]:
if score.section == self.name:
found_score = score
break
if found_score or generate_random_scores:
if generate_random_scores: # for debugging!
earned = random.randint(2, 15)
possible = random.randint(earned, 15)
else: # We found the score
earned = found_score.earned
possible = found_score.possible
percent = earned / float(possible)
detail = u"{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(
name=self.name,
percent=percent,
earned=float(earned),
possible=float(possible)
)
else:
percent = 0.0
detail = u"{name} - 0% (?/?)".format(name=self.name)
breakdown = [{'percent': percent, 'label': self.short_label,
'detail': detail, 'category': self.category, 'prominent': True}]
return {'percent': percent,
'section_breakdown': breakdown,
#No grade_breakdown here
}
class AssignmentFormatGrader(CourseGrader):
"""
Grades all sections matching the format 'type' with an equal weight. A specified
number of lowest scores can be dropped from the calculation. The minimum number of
sections in this format must be specified (even if those sections haven't been
written yet).
min_count defines how many assignments are expected throughout the course. Placeholder
scores (of 0) will be inserted if the number of matching sections in the course is < min_count.
If there number of matching sections in the course is > min_count, min_count will be ignored.
show_only_average is to suppress the display of each assignment in this grader and instead
only show the total score of this grader in the breakdown.
hide_average is to suppress the display of the total score in this grader and instead
only show each assignment in this grader in the breakdown.
If there is only a single assignment in this grader, then it acts like a SingleSectionGrader
and returns only one entry for the grader. Since the assignment and the total are the same,
the total is returned but is not labeled as an average.
category should be presentable to the user, but may not appear. When the grade breakdown is
displayed, scores from the same category will be similar (for example, by color).
section_type is a string that is the type of a singular section. For example, for Labs it
would be "Lab". This defaults to be the same as category.
short_label is similar to section_type, but shorter. For example, for Homework it would be
"HW".
starting_index is the first number that will appear. For example, starting_index=3 and
min_count = 2 would produce the labels "Assignment 3", "Assignment 4"
"""
def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None,
show_only_average=False, hide_average=False, starting_index=1):
self.type = type
self.min_count = min_count
self.drop_count = drop_count
self.category = category or self.type
self.section_type = section_type or self.type
self.short_label = short_label or self.type
self.show_only_average = show_only_average
self.starting_index = starting_index
self.hide_average = hide_average
def grade(self, grade_sheet, generate_random_scores=False):
def total_with_drops(breakdown, drop_count):
'''calculates total score for a section while dropping lowest scores'''
#create an array of tuples with (index, mark), sorted by mark['percent'] descending
sorted_breakdown = sorted(enumerate(breakdown), key=lambda x: -x[1]['percent'])
# A list of the indices of the dropped scores
dropped_indices = []
if drop_count > 0:
dropped_indices = [x[0] for x in sorted_breakdown[-drop_count:]]
aggregate_score = 0
for index, mark in enumerate(breakdown):
if index not in dropped_indices:
aggregate_score += mark['percent']
if (len(breakdown) - drop_count > 0):
aggregate_score /= len(breakdown) - drop_count
return aggregate_score, dropped_indices
#Figure the homework scores
scores = grade_sheet.get(self.type, [])
breakdown = []
for i in range(max(self.min_count, len(scores))):
if i < len(scores) or generate_random_scores:
if generate_random_scores: # for debugging!
earned = random.randint(2, 15)
possible = random.randint(earned, 15)
section_name = "Generated"
else:
earned = scores[i].earned
possible = scores[i].possible
section_name = scores[i].section
percentage = earned / float(possible)
summary_format = u"{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})"
summary = summary_format.format(
index=i + self.starting_index,
section_type=self.section_type,
name=section_name,
percent=percentage,
earned=float(earned),
possible=float(possible)
)
else:
percentage = 0
summary = u"{section_type} {index} Unreleased - 0% (?/?)".format(
index=i + self.starting_index,
section_type=self.section_type
)
short_label = u"{short_label} {index:02d}".format(
index=i + self.starting_index,
short_label=self.short_label
)
breakdown.append({'percent': percentage, 'label': short_label,
'detail': summary, 'category': self.category})
total_percent, dropped_indices = total_with_drops(breakdown, self.drop_count)
for dropped_index in dropped_indices:
breakdown[dropped_index]['mark'] = {'detail': u"The lowest {drop_count} {section_type} scores are dropped."
.format(drop_count=self.drop_count, section_type=self.section_type)}
if len(breakdown) == 1:
# if there is only one entry in a section, suppress the existing individual entry and the average,
# and just display a single entry for the section. That way it acts automatically like a
# SingleSectionGrader.
total_detail = u"{section_type} = {percent:.0%}".format(
percent=total_percent,
section_type=self.section_type,
)
total_label = u"{short_label}".format(short_label=self.short_label)
breakdown = [{'percent': total_percent, 'label': total_label,
'detail': total_detail, 'category': self.category, 'prominent': True}, ]
else:
total_detail = u"{section_type} Average = {percent:.0%}".format(
percent=total_percent,
section_type=self.section_type
)
total_label = u"{short_label} Avg".format(short_label=self.short_label)
if self.show_only_average:
breakdown = []
if not self.hide_average:
breakdown.append({'percent': total_percent, 'label': total_label,
'detail': total_detail, 'category': self.category, 'prominent': True})
return {'percent': total_percent,
'section_breakdown': breakdown,
#No grade_breakdown here
}
| agpl-3.0 |
anthonylife/TaobaoCompetition2014 | src/tree_based/makeTestFeature.py | 1 | 9734 | #!/usr/bin/env python
#encoding=utf8
#Copyright [2014] [Wei Zhang]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
###################################################################
# Date: 2014/3/18 #
# Make feature for decision/regression method #
# Feature lists: 1.Collaborative feture #
# 2.User product interaction feature #
# 3.User feature #
# 4.Product feature #
###################################################################
import sys, csv, json, argparse, random, time
sys.path.append("../")
import numpy as np
from collections import defaultdict
from tool import combineFeature, calSegNum, calMonthLength
settings = json.loads(open("../../SETTINGS.json").read())
TOTAL_MONTH = 3
def genTestPairForBuy(data):
uid_set = set([])
pid_set = set([])
for entry in data:
uid = int(entry[0])
pid = int(entry[1])
uid_set.add(uid)
pid_set.add(pid)
test_pairs = []
for uid in uid_set:
for pid in pid_set:
#test_pairs.append([uid, pid, settings["TIME_LAST_MONTH"], settings["TIME_LAST_DAY"]])
test_pairs.append([uid, pid])
return test_pairs
def getUserAction(data):
user_behavior = {}
for entry in data:
uid = entry[0]
pid = entry[1]
action_type = entry[2]
month = entry[3]
day = entry[4]
if uid not in user_behavior:
user_behavior[uid] = defaultdict(list)
user_behavior[uid][pid].append([action_type, month, day])
return user_behavior
def getProductAction(data):
product_behavior = {}
for entry in data:
uid = entry[0]
pid = entry[1]
action_type = entry[2]
month = entry[3]
day = entry[4]
if pid not in product_behavior:
product_behavior[pid] = defaultdict(list)
product_behavior[pid][uid].append([action_type, month, day])
return product_behavior
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-r', type=int, action='store',
dest='ratio', help='number of negative to positive')
parser.add_argument('-cf', type=int, action='store',
dest='tCF', help='whether use collaborative feature')
parser.add_argument('-up', type=int, action='store',
dest='tUP', help='whether use user product interaction feature')
parser.add_argument('-u', type=int, action='store',
dest='tU', help='whether use user feature')
parser.add_argument('-p', type=int, action='store',
dest='tP', help='whether use product feature')
if len(sys.argv) != 11:
print 'Command e.g.: python makeFeature.py -r 5 -cf 1(0) -up 1(0) '\
+ '-u 1(0) -p 1(0)'
para = parser.parse_args()
user_factor = {}
for entry in csv.reader(open(settings["MODEL_USER_FILE"])):
uid = int(entry[0])
factor = np.array(map(float, entry[1:]))
user_factor[uid] = factor
product_factor = {}
for entry in csv.reader(open(settings["MODEL_PRODUCT_FILE"])):
pid = int(entry[0])
factor = np.array(map(float, entry[1:]))
product_factor[pid] = factor
data = [entry for entry in csv.reader(open(settings["TRAIN_DATA_FILE"]))]
data = [map(int, entry) for entry in data[1:]]
user_behavior = getUserAction(data)
user_bought = {}
for uid in user_behavior:
user_bought[uid] = [0.0 for i in xrange(4)]
total_num = [0 for i in xrange(TOTAL_MONTH)]
product_set = [set([]) for i in xrange(TOTAL_MONTH)]
for pid in user_behavior[uid]:
for entry in user_behavior[uid][pid]:
if entry[0] == 1:
seg_num = calSegNum(entry[1], entry[2])
product_set[seg_num-1].add(pid)
total_num[seg_num-1]+= 1
for i in xrange(TOTAL_MONTH):
user_bought[uid][0] += len(product_set[i])
user_bought[uid][2] += total_num[i]
user_bought[uid][1] = float(user_bought[uid][0])/TOTAL_MONTH
user_bought[uid][0] = len(product_set[TOTAL_MONTH-1])
user_bought[uid][3] = float(user_bought[uid][2])/TOTAL_MONTH
user_bought[uid][2] = total_num[TOTAL_MONTH-1]
product_behavior = getProductAction(data)
product_bought = {}
for pid in product_behavior:
product_bought[pid] = [0.0 for i in xrange(4)]
total_num = [0 for i in xrange(TOTAL_MONTH)]
user_set = [set([]) for i in xrange(TOTAL_MONTH)]
for uid in product_behavior[pid]:
for entry in product_behavior[pid][uid]:
if entry[0] == 1:
seg_num = calSegNum(entry[1], entry[2])
user_set[seg_num-1].add(uid)
total_num[seg_num-1] += 1
for i in xrange(TOTAL_MONTH):
product_bought[pid][0] += len(user_set[i])
product_bought[pid][2] = total_num[i]
product_bought[pid][1] = float(product_bought[pid][0])/TOTAL_MONTH
product_bought[pid][0] = len(user_set[TOTAL_MONTH-1])
product_bought[pid][3] = float(product_bought[pid][2])/TOTAL_MONTH
product_bought[pid][2] = total_num[TOTAL_MONTH-1]
data = [entry for entry in csv.reader(open(settings["TAR_DATA_FILE"]))]
data = [map(int, entry) for entry in data[1:]]
test_pairs = genTestPairForBuy(data)
user_behavior = getUserAction(data)
writer = csv.writer(open(settings["GBT_TEST_FILE"], "w"), lineterminator="\n")
output_feature = [0 for i in range(59)]
score = 0.0
d_day = 14
d_month = 7
w_day = 8
w_month = 7
m_day = 14
m_month = 6
tmp_cnt = np.array([0 for i in range(16)])
print "Start generating features...."
a = time.clock()
for ii, pair in enumerate(test_pairs):
uid = pair[0]
pid = pair[1]
output_feature[0] = uid
output_feature[1] = pid
if para.tCF == 1:
if pid not in product_factor:
score = 0.0
elif uid not in user_factor:
score = 0.0
else:
score = np.dot(user_factor[uid], product_factor[pid])
output_feature[2] = score
if para.tUP == 1:
for entry in user_behavior[uid][pid]:
action_type = entry[0]
src_month = entry[1]
src_day = entry[2]
if src_month == d_month:
if src_day == d_day:
output_feature[3+action_type*3] = 1
output_feature[3+action_type*3+1] += 1
tmp_cnt[action_type] += 1
output_feature[15+action_type*3] = 1
output_feature[15+action_type*3+1] += 1
tmp_cnt[4+action_type] += 1
elif w_day <= src_day:
output_feature[15+action_type*3] = 1
output_feature[15+action_type*3+1] += 1
tmp_cnt[4+action_type] += 1
output_feature[27+action_type*3] = 1
output_feature[27+action_type*3+1] += 1
tmp_cnt[8+action_type] += 1
elif src_month == m_month and src_day > m_day:
output_feature[27+action_type*3] = 1
output_feature[27+action_type*3+1] += 1
tmp_cnt[8+action_type] += 1
output_feature[39+action_type*3] = 1
output_feature[39+action_type*3+1] += 1
tmp_cnt[12+action_type] += 1
for i in xrange(16):
if tmp_cnt[i] == 0:
output_feature[5+i*3] = 0
else:
output_feature[5+i*3] = float(output_feature[5+i*3])/tmp_cnt[i]
tmp_cnt[i] = 0
if para.tU == 1:
if uid not in user_bought:
output_feature[51] = 0
output_feature[52] = 0
output_feature[53] = 0
output_feature[54] = 0
else:
output_feature[51] = user_bought[uid][0]
output_feature[52] = user_bought[uid][1]
output_feature[53] = user_bought[uid][2]
output_feature[54] = user_bought[uid][3]
if para.tP == 1:
if pid not in product_bought:
output_feature[55] = 0
output_feature[56] = 0
output_feature[57] = 0
output_feature[58] = 0
else:
output_feature[55] = product_bought[pid][0]
output_feature[56] = product_bought[pid][1]
output_feature[57] = product_bought[pid][2]
output_feature[58] = product_bought[pid][3]
writer.writerow(output_feature)
output_feature = np.array([0.0 for i in range(59)])
if ii % 10000 == 0:
print "\r%d, cost time: %.1f seconds" % (ii, time.clock() - a)
a = time.clock()
if __name__ == "__main__":
main()
| gpl-2.0 |
CCPorg/WWW-InternetCoin-Ver-631-Original | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
crowdresearch/daemo | crowdsourcing/models.py | 2 | 38402 | import json
import os
import pandas as pd
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import models
from django.utils import timezone
from crowdsourcing.utils import get_delimiter, get_worker_cache
class TimeStampable(models.Model):
created_at = models.DateTimeField(auto_now_add=True, auto_now=False)
updated_at = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
abstract = True
class StripeObject(models.Model):
stripe_id = models.CharField(max_length=128, db_index=True)
stripe_data = JSONField(null=True)
class Meta:
abstract = True
class ArchiveQuerySet(models.query.QuerySet):
def active(self):
return self.filter(deleted_at__isnull=True)
def inactive(self):
return self.filter(deleted_at__isnull=False)
class Archivable(models.Model):
deleted_at = models.DateTimeField(null=True)
objects = ArchiveQuerySet.as_manager()
class Meta:
abstract = True
def delete(self, using=None, keep_parents=False):
self.archive()
def archive(self):
self.deleted_at = timezone.now()
self.save()
def hard_delete(self, using=None, keep_parents=False):
super(Archivable, self).delete()
class Activable(models.Model):
is_active = models.BooleanField(default=True)
class Meta:
abstract = True
class Verifiable(models.Model):
is_verified = models.BooleanField(default=False)
class Meta:
abstract = True
class Revisable(models.Model):
revised_at = models.DateTimeField(auto_now_add=True, auto_now=False)
revision_log = models.CharField(max_length=512, null=True, blank=True)
group_id = models.IntegerField(null=True, db_index=True)
class Meta:
abstract = True
class Region(TimeStampable):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the region!'})
code = models.CharField(max_length=16, error_messages={'required': 'Please specify the region code!'})
class Country(TimeStampable):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the country!'})
code = models.CharField(max_length=8, error_messages={'required': 'Please specify the country code!'})
region = models.ForeignKey(Region, related_name='countries', null=True, blank=True)
def __unicode__(self):
return u'%s' % (self.name,)
class City(TimeStampable):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the city!'})
state = models.CharField(max_length=64, blank=True)
state_code = models.CharField(max_length=64, blank=True)
country = models.ForeignKey(Country, related_name='cities')
def __unicode__(self):
return u'%s' % (self.name,)
class Address(TimeStampable):
street = models.CharField(max_length=128, blank=True, null=True)
city = models.ForeignKey(City, related_name='addresses', null=True, blank=True)
postal_code = models.CharField(null=True, blank=True, max_length=32)
def __unicode__(self):
return u'%s, %s, %s' % (self.street, self.city.name, self.city.country.name)
class Language(TimeStampable):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the language!'})
iso_code = models.CharField(max_length=8)
class Skill(TimeStampable, Archivable, Verifiable):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the skill name!"})
description = models.CharField(max_length=512, error_messages={'required': "Please enter the skill description!"})
parent = models.ForeignKey('self', related_name='skills', null=True)
class Role(TimeStampable, Archivable, Activable):
name = models.CharField(max_length=32, unique=True,
error_messages={'required': 'Please specify the role name!',
'unique': 'The role %(value)r already exists. Please provide another name!'
})
class Currency(TimeStampable):
name = models.CharField(max_length=32)
iso_code = models.CharField(max_length=8)
class Category(TimeStampable, Archivable):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the category name!"})
parent = models.ForeignKey('self', related_name='categories', null=True)
class UserRegistration(TimeStampable):
user = models.OneToOneField(User)
activation_key = models.CharField(max_length=40)
class RegistrationWhitelist(TimeStampable):
email = models.EmailField(db_index=True)
valid_from = models.DateTimeField(null=True)
class UserPasswordReset(TimeStampable):
user = models.OneToOneField(User)
reset_key = models.CharField(max_length=40)
class UserProfile(TimeStampable, Verifiable):
MALE = 'M'
FEMALE = 'F'
OTHER = 'O'
DO_NOT_STATE = ('DNS', 'Prefer not to specify')
GENDER = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(OTHER, 'Other')
)
PERSONAL = 'personal'
PROFESSIONAL = 'professional'
OTHER = 'other'
RESEARCH = 'research'
PURPOSE_OF_USE = (
(PROFESSIONAL, 'Professional'),
(PERSONAL, 'personal'),
(RESEARCH, 'research'),
(OTHER, 'other')
)
ETHNICITY = (
('white', 'White'),
('hispanic', 'Hispanic'),
('black', 'Black'),
('islander', 'Native Hawaiian or Other Pacific Islander'),
('indian', 'Indian'),
('asian', 'Asian'),
('native', 'Native American or Alaska Native'),
('mixed', 'Mixed Race'),
('other', 'Other')
)
INCOME = (
('less_1k', 'Less than $1,000'),
('1k', '$1,000 - $1,999'),
('2.5k', '$2,500 - $4,999'),
('5k', '$5,000 - $7,499'),
('7.5k', '$7,500 - $9,999'),
('10k', '$10,000 - $14,999'),
('15k', '$15,000 - $24,999'),
('25k', '$25,000 - $39,999'),
('40k', '$40,000 - $59,999'),
('60k', '$60,000 - $74,999'),
('75k', '$75,000 - $99,999'),
('100k', '$100,000 - $149,999'),
('150k', '$150,000 - $199,999'),
('200k', '$200,000 - $299,999'),
('300k_more', '$300,000 or more')
)
EDUCATION = (
('some_high', 'Some High School, No Degree'),
('high', 'High School Degree or Equivalent'),
('some_college', 'Some College, No Degree'),
('associates', 'Associates Degree'),
('bachelors', 'Bachelors Degree'),
('masters', 'Graduate Degree, Masters'),
('doctorate', 'Graduate Degree, Doctorate')
)
user = models.OneToOneField(User, related_name='profile')
gender = models.CharField(max_length=1, choices=GENDER, blank=True, null=True)
purpose_of_use = models.CharField(max_length=64, choices=PURPOSE_OF_USE, blank=True, null=True)
ethnicity = models.CharField(max_length=8, choices=ETHNICITY, blank=True, null=True)
job_title = models.CharField(max_length=100, blank=True, null=True)
address = models.ForeignKey(Address, related_name='+', blank=True, null=True)
birthday = models.DateTimeField(blank=True, null=True)
nationality = models.ManyToManyField(Country, through='UserCountry')
languages = models.ManyToManyField(Language, through='UserLanguage')
picture = models.BinaryField(null=True)
last_active = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
is_worker = models.BooleanField(default=True)
is_requester = models.BooleanField(default=False)
income = models.CharField(max_length=9, choices=INCOME, blank=True, null=True)
education = models.CharField(max_length=12, choices=EDUCATION, blank=True, null=True)
unspecified_responses = JSONField(null=True)
handle = models.CharField(max_length=32, db_index=True, blank=False, unique=True)
class UserCountry(TimeStampable):
country = models.ForeignKey(Country)
user = models.ForeignKey(UserProfile)
class UserSkill(TimeStampable, Verifiable):
user = models.ForeignKey(User)
skill = models.ForeignKey(Skill)
level = models.IntegerField(default=0)
class Meta:
unique_together = ('user', 'skill')
class UserRole(TimeStampable):
user = models.ForeignKey(User)
role = models.ForeignKey(Role)
class UserLanguage(TimeStampable):
language = models.ForeignKey(Language)
user = models.ForeignKey(UserProfile)
class UserPreferences(TimeStampable):
user = models.OneToOneField(User, related_name='preferences')
language = models.ForeignKey(Language, null=True, blank=True)
currency = models.ForeignKey(Currency, null=True, blank=True)
login_alerts = models.SmallIntegerField(default=0)
auto_accept = models.BooleanField(default=False)
new_tasks_notifications = models.BooleanField(default=True)
aux_attributes = JSONField(default={})
class Template(TimeStampable, Archivable, Revisable):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the template name!"})
owner = models.ForeignKey(User, related_name='templates')
source_html = models.TextField(default=None, null=True)
price = models.FloatField(default=0)
share_with_others = models.BooleanField(default=False)
class BatchFile(TimeStampable, Archivable):
name = models.CharField(max_length=256)
file = models.FileField(upload_to='project_files/')
format = models.CharField(max_length=8, default='csv')
number_of_rows = models.IntegerField(default=1, null=True)
column_headers = ArrayField(models.CharField(max_length=64))
first_row = JSONField(null=True, blank=True)
hash_sha512 = models.CharField(max_length=128, null=True, blank=True)
url = models.URLField(null=True, blank=True)
def parse_csv(self):
delimiter = get_delimiter(self.file.name)
df = pd.DataFrame(pd.read_csv(self.file, sep=delimiter, encoding='utf-8'))
df = df.where((pd.notnull(df)), None)
return df.to_dict(orient='records')
def delete(self, *args, **kwargs):
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(root, self.file.url[1:])
os.remove(path)
super(BatchFile, self).delete(*args, **kwargs)
class ProjectQueryset(models.query.QuerySet):
def active(self):
return self.filter(deleted_at__isnull=True)
def inactive(self):
return self.filter(deleted_at__isnull=False)
def filter_by_boomerang(self, worker, sort_by='-boomerang'):
worker_cache = get_worker_cache(worker.id)
worker_data = json.dumps(worker_cache)
# noinspection SqlResolve
query = '''
WITH projects AS (
SELECT
ratings.project_id,
ratings.min_rating new_min_rating,
requester_ratings.requester_rating,
requester_ratings.raw_rating,
p_available.remaining available_tasks
FROM crowdsourcing_project p
INNER JOIN (SELECT
p.id,
count(t.id) remaining
FROM crowdsourcing_task t INNER JOIN (SELECT
group_id,
max(id) id
FROM crowdsourcing_task
WHERE deleted_at IS NULL
GROUP BY group_id) t_max ON t_max.id = t.id
INNER JOIN crowdsourcing_project p ON p.id = t.project_id
INNER JOIN (
SELECT
t.group_id,
sum(t.own) own,
sum(t.others) others
FROM (
SELECT
t.group_id,
CASE WHEN (tw.worker_id = (%(worker_id)s) AND tw.status <> 6)
or tw.is_qualified is FALSE
THEN 1
ELSE 0 END own,
CASE WHEN (tw.worker_id IS NOT NULL AND tw.worker_id <> (%(worker_id)s))
AND tw.status NOT IN (4, 6, 7)
THEN 1
ELSE 0 END others
FROM crowdsourcing_task t
LEFT OUTER JOIN crowdsourcing_taskworker tw ON (t.id =
tw.task_id)
WHERE t.exclude_at IS NULL AND t.deleted_at IS NULL) t
GROUP BY t.group_id) t_count ON t_count.group_id = t.group_id
WHERE t_count.own = 0 AND t_count.others < p.repetition
GROUP BY p.id) p_available ON p_available.id = p.id
INNER JOIN (
SELECT
u.id,
u.username,
CASE WHEN e.id IS NOT NULL
THEN TRUE
ELSE FALSE END is_denied
FROM auth_user u
LEFT OUTER JOIN crowdsourcing_requesteraccesscontrolgroup g
ON g.requester_id = u.id AND g.type = 2 AND g.is_global = TRUE
LEFT OUTER JOIN crowdsourcing_workeraccesscontrolentry e
ON e.group_id = g.id AND e.worker_id = (%(worker_id)s)) requester
ON requester.id=p.owner_id
LEFT OUTER JOIN (
SELECT
qualification_id,
json_agg(i.expression::JSON) expressions
FROM crowdsourcing_qualificationitem i
where i.scope = 'project'
GROUP BY i.qualification_id
) quals
ON quals.qualification_id = p.qualification_id
INNER JOIN get_min_project_ratings() ratings
ON p.id = ratings.project_id
LEFT OUTER JOIN (
SELECT
requester_id,
requester_rating AS raw_rating,
CASE WHEN requester_rating IS NULL AND requester_avg_rating
IS NOT NULL
THEN requester_avg_rating
WHEN requester_rating IS NULL AND requester_avg_rating IS NULL
THEN 1.99
WHEN requester_rating IS NOT NULL AND requester_avg_rating IS NULL
THEN requester_rating
ELSE requester_rating + 0.1 * requester_avg_rating END requester_rating
FROM get_requester_ratings(%(worker_id)s)) requester_ratings
ON requester_ratings.requester_id = ratings.owner_id
INNER JOIN (SELECT
requester_id,
CASE WHEN worker_rating IS NULL AND worker_avg_rating
IS NOT NULL
THEN worker_avg_rating
WHEN worker_rating IS NULL AND worker_avg_rating IS NULL
THEN 1.99
WHEN worker_rating IS NOT NULL AND worker_avg_rating IS NULL
THEN worker_rating
ELSE worker_rating + 0.1 * worker_avg_rating END worker_rating
FROM get_worker_ratings(%(worker_id)s)) worker_ratings
ON worker_ratings.requester_id = ratings.owner_id
AND (worker_ratings.worker_rating >= ratings.min_rating or p.enable_boomerang is FALSE
or p.owner_id = %(worker_id)s)
WHERE coalesce(p.deadline, NOW() + INTERVAL '1 minute') > NOW() AND p.status = 3 AND deleted_at IS NULL
AND (requester.is_denied = FALSE OR p.enable_blacklist = FALSE)
AND is_worker_qualified(quals.expressions, (%(worker_data)s)::JSON)
ORDER BY requester_rating DESC, ratings.project_id desc
)
select p.id, p.name, p.price, p.owner_id, p.created_at, p.allow_feedback,
p.is_prototype, projects.requester_rating, projects.raw_rating, projects.available_tasks,
up.handle requester_handle, p.published_at
FROM crowdsourcing_project p
INNER JOIN crowdsourcing_userprofile up on up.user_id = p.owner_id
INNER JOIN projects ON projects.project_id = p.id ORDER BY case when %(sort_by)s='-boomerang'
then requester_rating when %(sort_by)s='-available_tasks' then available_tasks
when %(sort_by)s='-published_at' then 12 when %(sort_by)s='-price' then p.price
end desc nulls last, p.id desc;
'''
return self.raw(query, params={
'worker_id': worker.id,
'st_in_progress': Project.STATUS_IN_PROGRESS,
'worker_data': worker_data,
'sort_by': sort_by
})
class Project(TimeStampable, Archivable, Revisable):
STATUS_DRAFT = 1
STATUS_PUBLISHED = 2
STATUS_IN_PROGRESS = 3
STATUS_COMPLETED = 4
STATUS_PAUSED = 5
STATUS_CROWD_REJECTED = 6
STATUS_ARCHIVED = 7
STATUS = (
(STATUS_DRAFT, 'Draft'),
(STATUS_PUBLISHED, 'Published'),
(STATUS_IN_PROGRESS, 'In Progress'),
(STATUS_COMPLETED, 'Completed'),
(STATUS_PAUSED, 'Paused'),
(STATUS_CROWD_REJECTED, 'Rejected'),
(STATUS_ARCHIVED, 'Archived'),
)
PERMISSION_ORW_WRW = 1
PERMISSION_OR_WRW = 2
PERMISSION_OR_WR = 3
PERMISSION_WR = 4
PERMISSION = (
(PERMISSION_ORW_WRW, 'Others:Read+Write::Workers:Read+Write'),
(PERMISSION_OR_WRW, 'Others:Read::Workers:Read+Write'),
(PERMISSION_OR_WR, 'Others:Read::Workers:Read'),
(PERMISSION_WR, 'Others:None::Workers:Read')
)
name = models.CharField(max_length=256, default="Untitled Project",
error_messages={'required': "Please enter the project name!"})
description = models.TextField(null=True, max_length=2048, blank=True)
owner = models.ForeignKey(User, related_name='projects')
parent = models.ForeignKey('self', related_name='projects', null=True, on_delete=models.SET_NULL)
template = models.ForeignKey(Template, null=True)
categories = models.ManyToManyField(Category, through='ProjectCategory')
keywords = models.TextField(null=True, blank=True)
status = models.IntegerField(choices=STATUS, default=STATUS_DRAFT)
qualification = models.ForeignKey('Qualification', null=True)
price = models.DecimalField(decimal_places=2, max_digits=19, null=True)
aux_attributes = JSONField(null=True, default={'sort_results_by': 'worker_id'})
repetition = models.IntegerField(default=1)
max_tasks = models.PositiveIntegerField(null=True, default=None)
is_micro = models.BooleanField(default=True)
is_prototype = models.BooleanField(default=True)
is_api_only = models.BooleanField(default=True)
is_paid = models.BooleanField(default=False)
is_review = models.BooleanField(default=False)
# has_review = models.BooleanField(default=False)
timeout = models.DurationField(null=True, default=settings.DEFAULT_TASK_TIMEOUT)
deadline = models.DateTimeField(null=True)
task_time = models.DurationField(null=True)
has_data_set = models.BooleanField(default=False)
data_set_location = models.CharField(max_length=256, null=True, blank=True)
batch_files = models.ManyToManyField(BatchFile, through='ProjectBatchFile')
min_rating = models.FloatField(default=3.0)
previous_min_rating = models.FloatField(default=3.0)
tasks_in_progress = models.IntegerField(default=0)
rating_updated_at = models.DateTimeField(auto_now_add=True, auto_now=False)
allow_feedback = models.BooleanField(default=True)
feedback_permissions = models.IntegerField(choices=PERMISSION, default=PERMISSION_ORW_WRW)
enable_blacklist = models.BooleanField(default=True)
enable_whitelist = models.BooleanField(default=True)
post_mturk = models.BooleanField(default=False)
publish_at = models.DateTimeField(null=True)
published_at = models.DateTimeField(null=True)
last_opened_at = models.DateTimeField(null=True)
allow_price_per_task = models.BooleanField(default=False)
task_price_field = models.CharField(max_length=32, null=True)
amount_due = models.DecimalField(decimal_places=2, max_digits=8, default=0)
discussion_link = models.TextField(null=True, blank=True)
topic_id = models.IntegerField(null=True, default=-1)
post_id = models.IntegerField(null=True, default=-1)
enable_boomerang = models.BooleanField(default=True)
objects = ProjectQueryset.as_manager()
class Meta:
index_together = [['deadline', 'status', 'min_rating', 'deleted_at'], ['owner', 'deleted_at', 'created_at']]
class ProjectWorkerToRate(TimeStampable):
project = models.ForeignKey(Project, on_delete=models.CASCADE)
batch = models.ForeignKey('Batch', on_delete=models.SET_NULL, null=True)
worker = models.ForeignKey(User)
class ProjectBatchFile(models.Model):
batch_file = models.ForeignKey(BatchFile, on_delete=models.CASCADE)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
class Meta:
unique_together = ('batch_file', 'project',)
class ProjectCategory(TimeStampable):
project = models.ForeignKey(Project)
category = models.ForeignKey(Category)
class Meta:
unique_together = ('category', 'project')
class TemplateItem(TimeStampable, Revisable):
ROLE_DISPLAY = 'display'
ROLE_INPUT = 'input'
ROLE = (
(ROLE_DISPLAY, 'Display'),
(ROLE_INPUT, 'Input'),
)
name = models.CharField(max_length=128, default='')
template = models.ForeignKey(Template, related_name='items', on_delete=models.CASCADE)
role = models.CharField(max_length=16, choices=ROLE, default=ROLE_DISPLAY)
type = models.CharField(max_length=16, db_index=True)
sub_type = models.CharField(max_length=16, null=True)
position = models.IntegerField(null=True)
required = models.BooleanField(default=True)
predecessor = models.ForeignKey('self', null=True, related_name='successors', on_delete=models.SET_NULL,
db_index=True)
aux_attributes = JSONField()
class Meta:
ordering = ['position']
class TemplateItemProperties(TimeStampable):
template_item = models.ForeignKey(TemplateItem, related_name='properties')
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
class CollectiveRejection(TimeStampable, Archivable):
REASON_LOW_PAY = 1
REASON_INAPPROPRIATE = 2
REASON_OTHER = 3
REASON = (
(REASON_LOW_PAY, 'The pay is too low for the amount of work'),
(REASON_INAPPROPRIATE, 'The content is offensive or inappropriate'),
(REASON_OTHER, 'Other')
)
reason = models.IntegerField(choices=REASON)
detail = models.CharField(max_length=1024, null=True, blank=True)
class Batch(TimeStampable):
parent = models.ForeignKey('Batch', null=True)
class Task(TimeStampable, Archivable, Revisable):
project = models.ForeignKey(Project, related_name='tasks', on_delete=models.CASCADE)
data = JSONField(null=True)
exclude_at = models.ForeignKey(Project, related_name='excluded_tasks', db_column='exclude_at',
null=True, on_delete=models.SET_NULL)
row_number = models.IntegerField(null=True, db_index=True)
rerun_key = models.CharField(max_length=64, db_index=True, null=True)
batch = models.ForeignKey('Batch', related_name='tasks', null=True, on_delete=models.CASCADE)
hash = models.CharField(max_length=64, db_index=True)
min_rating = models.FloatField(default=3.0)
rating_updated_at = models.DateTimeField(auto_now=False, auto_now_add=False, null=True)
price = models.DecimalField(decimal_places=2, max_digits=19, null=True)
class Meta:
index_together = (('rerun_key', 'hash',),)
class TaskWorker(TimeStampable, Archivable, Revisable):
STATUS_IN_PROGRESS = 1
STATUS_SUBMITTED = 2
STATUS_ACCEPTED = 3
STATUS_REJECTED = 4
STATUS_RETURNED = 5
STATUS_SKIPPED = 6
STATUS_EXPIRED = 7
STATUS = (
(STATUS_IN_PROGRESS, 'In Progress'),
(STATUS_SUBMITTED, 'Submitted'),
(STATUS_ACCEPTED, 'Accepted'),
(STATUS_REJECTED, 'Rejected'),
(STATUS_RETURNED, 'Returned'),
(STATUS_SKIPPED, 'Skipped'),
(STATUS_EXPIRED, 'Expired'),
)
task = models.ForeignKey(Task, related_name='task_workers', on_delete=models.CASCADE)
worker = models.ForeignKey(User, related_name='task_workers')
status = models.IntegerField(choices=STATUS, default=STATUS_IN_PROGRESS, db_index=True)
is_paid = models.BooleanField(default=False)
paid_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
collective_rejection = models.OneToOneField(CollectiveRejection, null=True)
charge = models.ForeignKey('StripeCharge', null=True)
submitted_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True, db_index=True)
started_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
approved_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
returned_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
is_qualified = models.BooleanField(default=True, db_index=True)
attempt = models.SmallIntegerField(default=0)
auto_approved = models.BooleanField(default=False)
class Meta:
unique_together = ('task', 'worker')
class TaskWorkerSession(TimeStampable):
started_at = models.DateTimeField(auto_now_add=False, auto_now=False, db_index=True)
ended_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True, db_index=True)
task_worker = models.ForeignKey('TaskWorker', related_name='sessions')
class TaskWorkerResult(TimeStampable, Archivable):
task_worker = models.ForeignKey(TaskWorker, related_name='results', on_delete=models.CASCADE)
result = JSONField(null=True)
attachment = models.ForeignKey('FileResponse', null=True)
template_item = models.ForeignKey(TemplateItem, related_name='+')
class FileResponse(TimeStampable):
file = models.FileField(upload_to='responses/%Y/%m/%d/')
name = models.CharField(max_length=256)
owner = models.ForeignKey(User)
hash_sha512 = models.CharField(max_length=128, null=True, blank=True)
class WorkerProjectScore(TimeStampable):
project_group_id = models.IntegerField()
worker = models.ForeignKey(User, related_name='project_scores')
mu = models.FloatField(default=25.000)
sigma = models.FloatField(default=8.333)
class WorkerMatchScore(TimeStampable):
worker = models.ForeignKey(TaskWorker, related_name='match_scores')
project_score = models.ForeignKey(WorkerProjectScore, related_name='match_scores')
mu = models.FloatField()
sigma = models.FloatField()
class MatchGroup(TimeStampable):
batch = models.ForeignKey(Batch, related_name='match_group')
rerun_key = models.CharField(max_length=64, null=True, db_index=True)
hash = models.CharField(max_length=64, db_index=True)
parent = models.ForeignKey('self', related_name='children_groups', null=True)
class Meta:
index_together = (('rerun_key', 'hash',),)
class Match(TimeStampable):
STATUS_CREATED = 1
STATUS_COMPLETED = 2
STATUS = (
(STATUS_CREATED, 'Created'),
(STATUS_COMPLETED, 'Completed'),
)
status = models.IntegerField(choices=STATUS, default=STATUS_CREATED)
submitted_at = models.DateTimeField(null=True)
group = models.ForeignKey(MatchGroup, related_name='matches')
task = models.ForeignKey(Task, related_name='matches', null=True)
class MatchWorker(TimeStampable):
match = models.ForeignKey(Match, related_name='workers')
task_worker = models.ForeignKey(TaskWorker, related_name='matches')
mu = models.FloatField(null=True)
sigma = models.FloatField(null=True)
old_mu = models.FloatField(default=25.0, null=True)
old_sigma = models.FloatField(default=8.333, null=True)
class ActivityLog(TimeStampable):
"""
Track all user's activities: Create, Update and Delete
"""
activity = models.CharField(max_length=512)
author = models.ForeignKey(User, related_name='activities')
class Qualification(TimeStampable):
TYPE_STRICT = 1
TYPE_FLEXIBLE = 2
name = models.CharField(max_length=64, null=True)
description = models.CharField(max_length=512, null=True)
owner = models.ForeignKey(User, related_name='qualifications')
TYPE = (
(TYPE_STRICT, "Strict"),
(TYPE_FLEXIBLE, 'Flexible')
)
type = models.IntegerField(choices=TYPE, default=TYPE_STRICT)
class QualificationItem(TimeStampable):
qualification = models.ForeignKey(Qualification, related_name='items', on_delete=models.CASCADE)
expression = JSONField()
position = models.SmallIntegerField(null=True)
group = models.SmallIntegerField(default=1)
scope = models.CharField(max_length=32, default='project', db_index=True)
class Rating(TimeStampable):
RATING_WORKER = 1
RATING_REQUESTER = 2
RATING = (
(RATING_WORKER, "Worker"),
(RATING_REQUESTER, 'Requester')
)
origin = models.ForeignKey(User, related_name='ratings_to')
target = models.ForeignKey(User, related_name='ratings_from')
weight = models.FloatField(default=2)
origin_type = models.IntegerField(choices=RATING)
task = models.ForeignKey(Task, null=True)
class Meta:
index_together = [
['origin', 'target'],
['origin', 'target', 'updated_at', 'origin_type']
]
class RawRatingFeedback(TimeStampable):
requester = models.ForeignKey(User, related_name='raw_feedback')
worker = models.ForeignKey(User, related_name='+')
weight = models.FloatField(default=0)
task = models.ForeignKey(Task, null=True)
is_excluded = models.BooleanField(default=False)
class Meta:
unique_together = ('requester', 'worker', 'task')
index_together = ('requester', 'worker', 'task', 'is_excluded')
class BoomerangLog(TimeStampable):
object_id = models.PositiveIntegerField()
object_type = models.CharField(max_length=8, default='project')
min_rating = models.FloatField(default=3.0)
rating_updated_at = models.DateTimeField(auto_now=False, auto_now_add=False, null=True)
reason = models.CharField(max_length=64, null=True)
class Conversation(TimeStampable, Archivable):
subject = models.CharField(max_length=64)
sender = models.ForeignKey(User, related_name='conversations')
recipients = models.ManyToManyField(User, through='ConversationRecipient')
class ConversationRecipient(TimeStampable, Archivable):
STATUS_OPEN = 1
STATUS_MINIMIZED = 2
STATUS_CLOSED = 3
STATUS_MUTED = 4
STATUS = (
(STATUS_OPEN, "Open"),
(STATUS_MINIMIZED, 'Minimized'),
(STATUS_CLOSED, 'Closed'),
(STATUS_MUTED, 'Muted')
)
recipient = models.ForeignKey(User)
conversation = models.ForeignKey(Conversation, on_delete=models.CASCADE)
status = models.SmallIntegerField(choices=STATUS, default=STATUS_OPEN)
class Message(TimeStampable, Archivable):
conversation = models.ForeignKey(Conversation, related_name='messages', on_delete=models.CASCADE)
sender = models.ForeignKey(User, related_name='messages')
body = models.TextField(max_length=8192)
recipients = models.ManyToManyField(User, through='MessageRecipient')
class MessageRecipient(TimeStampable, Archivable):
STATUS_SENT = 1
STATUS_DELIVERED = 2
STATUS_READ = 3
STATUS = (
(STATUS_SENT, 'Sent'),
(STATUS_DELIVERED, 'Delivered'),
(STATUS_READ, 'Read')
)
message = models.ForeignKey(Message, on_delete=models.CASCADE)
recipient = models.ForeignKey(User)
status = models.IntegerField(choices=STATUS, default=STATUS_SENT)
delivered_at = models.DateTimeField(blank=True, null=True)
read_at = models.DateTimeField(blank=True, null=True)
class EmailNotification(TimeStampable):
# use updated_at to check last notification sent
recipient = models.OneToOneField(User)
class Comment(TimeStampable, Archivable):
sender = models.ForeignKey(User, related_name='comments')
body = models.TextField(max_length=8192)
parent = models.ForeignKey('self', related_name='comments', null=True)
class Meta:
ordering = ['created_at']
class ProjectComment(TimeStampable, Archivable):
project = models.ForeignKey(Project, related_name='comments')
comment = models.ForeignKey(Comment)
ready_for_launch = models.NullBooleanField()
aux_attributes = JSONField(default={}, null=True)
class TaskComment(TimeStampable, Archivable):
task = models.ForeignKey(Task, related_name='comments')
comment = models.ForeignKey(Comment)
class FinancialAccount(TimeStampable, Activable):
TYPE_WORKER = 1
TYPE_REQUESTER = 2
TYPE_ESCROW = 3
TYPE = (
(TYPE_WORKER, 'Earnings'),
(TYPE_REQUESTER, 'Deposits'),
(TYPE_ESCROW, 'Escrow')
)
owner = models.ForeignKey(User, related_name='financial_accounts', null=True)
type = models.IntegerField(choices=TYPE)
balance = models.DecimalField(default=0, decimal_places=4, max_digits=19)
is_system = models.BooleanField(default=False)
class RequesterAccessControlGroup(TimeStampable):
TYPE_ALLOW = 1
TYPE_DENY = 2
TYPE = (
(TYPE_ALLOW, "allow"),
(TYPE_DENY, "deny")
)
requester = models.ForeignKey(User, related_name="access_groups")
type = models.SmallIntegerField(choices=TYPE, default=TYPE_ALLOW)
name = models.CharField(max_length=256, null=True)
is_global = models.BooleanField(default=False)
class Meta:
index_together = [['requester', 'type', 'is_global']]
class WorkerAccessControlEntry(TimeStampable):
worker = models.ForeignKey(User)
group = models.ForeignKey(RequesterAccessControlGroup, related_name='entries')
class Meta:
unique_together = ('group', 'worker')
index_together = [['group', 'worker']]
class ReturnFeedback(TimeStampable, Archivable):
body = models.TextField(max_length=8192)
task_worker = models.ForeignKey(TaskWorker, related_name='return_feedback', on_delete=models.CASCADE)
notification_sent = models.BooleanField(default=False, db_index=True)
notification_sent_at = models.DateTimeField(null=True, auto_now_add=False, auto_now=False)
class Meta:
ordering = ['-created_at']
class Error(TimeStampable, Archivable):
code = models.CharField(max_length=16)
message = models.CharField(max_length=256)
trace = models.CharField(max_length=4096, null=True)
owner = models.ForeignKey(User, null=True, related_name='errors')
class StripeAccount(TimeStampable, Verifiable, StripeObject):
owner = models.OneToOneField(User, related_name='stripe_account')
class StripeCustomer(TimeStampable, StripeObject):
owner = models.OneToOneField(User, related_name='stripe_customer')
account_balance = models.IntegerField(default=0)
class StripeCharge(TimeStampable, StripeObject):
customer = models.ForeignKey(StripeCustomer, related_name='charges')
expired = models.BooleanField(default=False)
expired_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
balance = models.IntegerField()
discount_applied = models.BooleanField(default=False)
raw_amount = models.IntegerField()
discount = models.FloatField(default=1.0)
class Meta:
index_together = (('created_at',), ('created_at', 'customer'))
class StripeRefund(TimeStampable, StripeObject):
charge = models.ForeignKey(StripeCharge, related_name='refunds')
class StripeTransfer(TimeStampable, StripeObject):
destination = models.ForeignKey(User, related_name='received_transfers')
class StripeTransferReversal(TimeStampable, StripeObject):
transfer = models.ForeignKey(StripeTransfer, related_name='reversals')
class ProjectNotificationPreference(TimeStampable):
project_group_id = models.IntegerField()
worker = models.ForeignKey(User, related_name='notification_preferences')
notify = models.BooleanField(default=True)
class Meta:
unique_together = ('project_group_id', 'worker')
class WorkerProjectNotification(TimeStampable):
project = models.ForeignKey('Project')
worker = models.ForeignKey(User, related_name='project_notifications')
class WorkerBonus(TimeStampable):
worker = models.ForeignKey(User, related_name='bonuses_received')
requester = models.ForeignKey(User, related_name='bonuses_given')
reason = models.CharField(max_length=256, null=True, blank=True)
models.ForeignKey(Project, related_name='worker_bonuses', null=True)
charge = models.ForeignKey('StripeCharge', null=True)
amount = models.IntegerField()
class ProjectPreview(TimeStampable):
project = models.ForeignKey('Project')
user = models.ForeignKey(User)
| mit |
signalfx/Diamond | src/collectors/proc/test/testproc.py | 30 | 2218 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from proc import ProcessStatCollector
################################################################################
class TestProcessStatCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('ProcessStatCollector', {
'interval': 1
})
self.collector = ProcessStatCollector(config, None)
def test_import(self):
self.assertTrue(ProcessStatCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_stat(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/stat', 'r')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
ProcessStatCollector.PROC = self.getFixturePath('proc_stat_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
ProcessStatCollector.PROC = self.getFixturePath('proc_stat_2')
self.collector.collect()
metrics = {
'ctxt': 0,
'btime': 1319181102,
'processes': 0,
'procs_running': 1,
'procs_blocked': 0,
'ctxt': 1791,
'btime': 1319181102,
'processes': 2,
'procs_running': 1,
'procs_blocked': 0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
| mit |
tundish/rson | rson/__init__.py | 3 | 1525 | '''
RSON -- readable serial object notation
RSON is a superset of JSON with relaxed syntax for human readability.
Simple usage example:
import rson
obj = rson.loads(source)
Additional documentation available at:
http://code.google.com/p/rson/
'''
__version__ = '0.08'
__author__ = 'Patrick Maupin <pmaupin@gmail.com>'
__copyright__ = '''
Copyright (c) 2010-2012, Patrick Maupin. All rights reserved.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
'''
from rson.base import RSONDecodeError, loads
| mit |
CodeForAfrica/grano | grano/views/reconcile_api.py | 4 | 8048 | import json
import logging
from sqlalchemy import or_
from flask import Blueprint, request, url_for
from grano.lib.serialisation import jsonify
from grano.lib.args import object_or_404, get_limit
from grano.core import db, app_name
from grano.lib.exc import BadRequest
from grano.model import Entity, Project, Property
from grano.model import Schema, Attribute
from grano import authz
from grano.logic.reconcile import find_matches
blueprint = Blueprint('reconcile_api', __name__)
log = logging.getLogger(__name__)
def reconcile_index(project):
domain = url_for('base_api.status', _external=True).strip('/')
urlp = domain + '/entities/{{id}}'
meta = {
'name': '%s: %s' % (app_name, project.label),
'identifierSpace': 'http://rdf.freebase.com/ns/type.object.id',
'schemaSpace': 'http://rdf.freebase.com/ns/type.object.id',
'view': {'url': urlp},
'preview': {
'url': urlp + '?preview=true',
'width': 600,
'height': 300
},
'suggest': {
'entity': {
'service_url': domain,
'service_path': '/projects/' + project.slug + '/suggest_entity'
},
'type': {
'service_url': domain,
'service_path': '/projects/' + project.slug + '/suggest_type'
},
'property': {
'service_url': domain,
'service_path': '/projects/' + project.slug + '/suggest_property'
}
},
'defaultTypes': []
}
for schema in project.schemata:
if schema.hidden or schema.obj != 'entity':
continue
data = {
'id': '/%s/%s' % (project.slug, schema.name),
'name': schema.label
}
meta['defaultTypes'].append(data)
return jsonify(meta)
def reconcile_op(project, query):
log.info("Reconciling in %s: %r", project.slug, query)
schemata = []
if 'type' in query:
schemata = query.get('type')
if isinstance(schemata, basestring):
schemata = [schemata]
schemata = [s.rsplit('/', 1)[-1] for s in schemata]
properties = []
if 'properties' in query:
for p in query.get('properties'):
properties.append((p.get('pid'), p.get('v')))
matches = find_matches(project, request.account,
query.get('query', ''),
schemata=schemata,
properties=properties)
matches = matches.limit(get_limit(default=5))
results = []
for match in matches:
data = {
'name': match['entity']['name'].value,
'score': match['score'],
'type': [{
'id': '/' + project.slug + '/' + match['entity'].schema.name,
'name': match['entity'].schema.label
}],
'id': match['entity'].id,
'uri': url_for('entities_api.view', id=match['entity'].id,
_external=True),
'match': False #match['score'] == 100
}
results.append(data)
return {
'result': results,
'num': len(results)
}
@blueprint.route('/api/1/projects/<slug>/reconcile', methods=['GET', 'POST'])
def reconcile(slug):
"""
Reconciliation API, emulates Google Refine API. See:
http://code.google.com/p/google-refine/wiki/ReconciliationServiceApi
"""
project = object_or_404(Project.by_slug(slug))
authz.require(authz.project_read(project))
# TODO: Add proper support for types and namespacing.
data = request.args.copy()
data.update(request.form.copy())
if 'query' in data:
# single
q = data.get('query')
if q.startswith('{'):
try:
q = json.loads(q)
except ValueError:
raise BadRequest()
else:
q = data
return jsonify(reconcile_op(project, q))
elif 'queries' in data:
# multiple requests in one query
qs = data.get('queries')
try:
qs = json.loads(qs)
except ValueError:
raise BadRequest()
queries = {}
for k, q in qs.items():
queries[k] = reconcile_op(project, q)
return jsonify(queries)
else:
return reconcile_index(project)
@blueprint.route('/api/1/projects/<slug>/suggest_entity', methods=['GET', 'POST'])
def suggest_entity(slug):
"""
Suggest API, emulates Google Refine API. See:
https://github.com/OpenRefine/OpenRefine/wiki/Reconciliation-Service-API
"""
project = object_or_404(Project.by_slug(slug))
authz.require(authz.project_read(project))
prefix = '%%%s%%' % request.args.get('prefix', '')
log.info("Suggesting entities in %s: %r", project.slug, prefix)
q = db.session.query(Entity)
q = q.join(Property)
q = q.join(Project)
q = q.filter(Property.name == 'name')
q = q.filter(Property.active == True) # noqa
q = q.filter(Property.entity_id == Entity.id)
q = q.filter(Property.value_string.ilike(prefix))
q = q.filter(Project.slug == slug)
if 'type' in request.args:
schema_name = request.args.get('type')
if '/' in schema_name:
_, schema_name = schema_name.rsplit('/', 1)
q = q.join(Schema)
q = q.filter(Schema.name == schema_name)
q = q.distinct()
q = q.limit(get_limit(default=5))
matches = []
for e in q:
data = {
'name': e['name'].value,
'n:type': {
'id': '/' + project.slug + '/' + e.schema.name,
'name': e.schema.label
},
'uri': url_for('entities_api.view', id=e.id, _external=True),
'id': e.id
}
data['type'] = [data.get('n:type')]
matches.append(data)
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": request.args.get('prefix', ''),
"result": matches
})
@blueprint.route('/api/1/projects/<slug>/suggest_property', methods=['GET', 'POST'])
def suggest_property(slug):
project = object_or_404(Project.by_slug(slug))
authz.require(authz.project_read(project))
prefix = '%%%s%%' % request.args.get('prefix', '')
log.info("Suggesting property names in %s: %r", project.slug, prefix)
q = db.session.query(Attribute)
q = q.join(Schema)
q = q.filter(Schema.obj == 'entity')
q = q.filter(Schema.project == project)
q = q.filter(or_(Attribute.label.ilike(prefix),
Attribute.name.ilike(prefix)))
q = q.limit(get_limit(default=5))
matches = []
for attribute in q:
matches.append({
'name': attribute.label,
'n:type': {
'id': '/properties/property',
'name': 'Property'
},
'id': attribute.name
})
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": request.args.get('prefix', ''),
"result": matches
})
@blueprint.route('/api/1/projects/<slug>/suggest_type', methods=['GET', 'POST'])
def suggest_type(slug):
project = object_or_404(Project.by_slug(slug))
authz.require(authz.project_read(project))
prefix = '%%%s%%' % request.args.get('prefix', '')
log.info("Suggesting types in %s: %r", project.slug, prefix)
q = db.session.query(Schema)
q = q.filter(Schema.obj == 'entity')
q = q.filter(Schema.hidden == False) # noqa
q = q.filter(Schema.project == project)
q = q.filter(or_(Schema.label.ilike(prefix), Schema.name.ilike(prefix)))
q = q.limit(get_limit(default=5))
matches = []
for schema in q:
matches.append({
'name': schema.label,
'id': '/%s/%s' % (slug, schema.name)
})
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": request.args.get('prefix', ''),
"result": matches
})
| mit |
Danceiny/HackGirlfriend | Spider/WeiboSpider/config/conf.py | 1 | 1661 | # coding:utf-8
import os
import random
from yaml import load
config_path = os.path.join(os.path.dirname(__file__), 'spider.yaml')
with open(config_path, encoding='utf-8') as f:
cont = f.read()
cf = load(cont)
def get_db_args():
return cf.get('db')
def get_redis_args():
return cf.get('redis')
def get_timeout():
return cf.get('time_out')
def get_crawl_interal():
interal = random.randint(cf.get('min_crawl_interal'), cf.get('max_crawl_interal'))
return interal
def get_excp_interal():
return cf.get('excp_interal')
def get_max_repost_page():
return cf.get('max_repost_page')
def get_max_search_page():
return cf.get('max_search_page')
def get_max_home_page():
return cf.get('max_home_page')
def get_max_comment_page():
return cf.get('max_comment_page')
def get_max_retries():
return cf.get('max_retries')
def get_broker_or_backend(types):
"""
:param types: 类型,1表示中间人,2表示消息后端
:return:
"""
redis_info = cf.get('redis')
host = redis_info.get('host')
port = redis_info.get('port')
password = redis_info.get('password')
if types == 1:
db = redis_info.get('broker')
else:
db = redis_info.get('backend')
url = 'redis://:{}@{}:{}/{}'.format(password, host, port, db)
return url
def get_code_username():
return cf.get('yundama_username')
def get_code_password():
return cf.get('yundama_passwd')
def get_running_mode():
return cf.get('mode')
def get_share_host_count():
return cf.get('share_host_count')
def get_cookie_expire_time():
return cf.get('cookie_expire_time')
| apache-2.0 |
sigmunau/nav | python/nav/mibs/etherlike_mib.py | 1 | 1422 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Implements a EtherLike-MIB MibRetriever and associated functionality."""
from __future__ import absolute_import
from twisted.internet import defer
from . import mibretriever
class EtherLikeMib(mibretriever.MibRetriever):
"""MibRetriever for EtherLike-MIB"""
from nav.smidumps.etherlike_mib import MIB as mib
@defer.deferredGenerator
def get_duplex(self):
"""Get a mapping of ifindexes->duplex status."""
dw = defer.waitForDeferred(
self.retrieve_columns(('dot3StatsDuplexStatus',)))
yield dw
duplex = self.translate_result(dw.getResult())
result = dict([(index[0], row['dot3StatsDuplexStatus'])
for index, row in duplex.items()])
yield result
| gpl-2.0 |
scalable-networks/ext | gnuradio-3.7.0.1/gr-wxgui/python/wxgui/forms/__init__.py | 12 | 4094 | #
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
The following classes will be available through gnuradio.wxgui.forms:
"""
########################################################################
# External Converters
########################################################################
from converters import \
eval_converter, str_converter, \
float_converter, int_converter
########################################################################
# External Forms
########################################################################
from forms import \
radio_buttons, drop_down, notebook, \
button, toggle_button, single_button, \
check_box, text_box, static_text, \
slider, log_slider, gauge, \
make_bold, DataEvent, EVT_DATA
########################################################################
# Helpful widgets
########################################################################
import wx
class static_box_sizer(wx.StaticBoxSizer):
"""
A box sizer with label and border.
Args:
parent: the parent widget
sizer: add this widget to sizer if provided (optional)
proportion: the proportion when added to the sizer (default=0)
flag: the flag argument when added to the sizer (default=wx.EXPAND)
label: title label for this widget (optional)
bold: true to boldify the label
orient: the sizer orientation wx.VERTICAL or wx.HORIZONTAL (default=wx.VERTICAL)
"""
def __init__(self, parent, label='', bold=False, sizer=None, orient=wx.VERTICAL, proportion=0, flag=wx.EXPAND):
box = wx.StaticBox(parent=parent, label=label)
if bold: make_bold(box)
wx.StaticBoxSizer.__init__(self, box=box, orient=orient)
if sizer: sizer.Add(self, proportion, flag)
class incr_decr_buttons(wx.BoxSizer):
"""
A horizontal box sizer with a increment and a decrement button.
Args:
parent: the parent widget
on_incr: the callback for pressing the + button
on_decr: the callback for pressing the - button
label: title label for this widget (optional)
sizer: add this widget to sizer if provided (optional)
proportion: the proportion when added to the sizer (default=0)
flag: the flag argument when added to the sizer (default=wx.EXPAND)
"""
def __init__(self, parent, on_incr, on_decr, label='', sizer=None, proportion=0, flag=wx.EXPAND):
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
buttons_box = wx.BoxSizer(wx.HORIZONTAL)
self._incr_button = wx.Button(parent, label='+', style=wx.BU_EXACTFIT)
self._incr_button.Bind(wx.EVT_BUTTON, on_incr)
buttons_box.Add(self._incr_button, 0, wx.ALIGN_CENTER_VERTICAL)
self._decr_button = wx.Button(parent, label=' - ', style=wx.BU_EXACTFIT)
self._decr_button.Bind(wx.EVT_BUTTON, on_decr)
buttons_box.Add(self._decr_button, 0, wx.ALIGN_CENTER_VERTICAL)
if label: #add label
self.Add(wx.StaticText(parent, label='%s: '%label), 1, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT)
self.Add(buttons_box, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
else: self.Add(buttons_box, 0, wx.ALIGN_CENTER_VERTICAL)
if sizer: sizer.Add(self, proportion, flag)
def Disable(self, disable=True): self.Enable(not disable)
def Enable(self, enable=True):
if enable:
self._incr_button.Enable()
self._decr_button.Enable()
else:
self._incr_button.Disable()
self._decr_button.Disable()
| gpl-2.0 |
google-code-export/photivo | scons-local-2.2.0/SCons/Script/__init__.py | 14 | 14186 | """SCons.Script
This file implements the main() function used by the scons script.
Architecturally, this *is* the scons script, and will likely only be
called from the external "scons" wrapper. Consequently, anything here
should not be, or be considered, part of the build engine. If it's
something that we expect other software to want to use, it should go in
some other module. If it's specific to the "scons" script invocation,
it goes here.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Script/__init__.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import time
start_time = time.time()
import collections
import os
import sys
# Special chicken-and-egg handling of the "--debug=memoizer" flag:
#
# SCons.Memoize contains a metaclass implementation that affects how
# the other classes are instantiated. The Memoizer may add shim methods
# to classes that have methods that cache computed values in order to
# count and report the hits and misses.
#
# If we wait to enable the Memoization until after we've parsed the
# command line options normally, it will be too late, because the Memoizer
# will have already analyzed the classes that it's Memoizing and decided
# to not add the shims. So we use a special-case, up-front check for
# the "--debug=memoizer" flag and enable Memoizer before we import any
# of the other modules that use it.
_args = sys.argv + os.environ.get('SCONSFLAGS', '').split()
if "--debug=memoizer" in _args:
import SCons.Memoize
import SCons.Warnings
try:
SCons.Memoize.EnableMemoization()
except SCons.Warnings.Warning:
# Some warning was thrown. Arrange for it to be displayed
# or not after warnings are configured.
import Main
exc_type, exc_value, tb = sys.exc_info()
Main.delayed_warnings.append((exc_type, exc_value))
del _args
import SCons.Action
import SCons.Builder
import SCons.Environment
import SCons.Node.FS
import SCons.Options
import SCons.Platform
import SCons.Scanner
import SCons.SConf
import SCons.Subst
import SCons.Tool
import SCons.Util
import SCons.Variables
import SCons.Defaults
import Main
main = Main.main
# The following are global class definitions and variables that used to
# live directly in this module back before 0.96.90, when it contained
# a lot of code. Some SConscript files in widely-distributed packages
# (Blender is the specific example) actually reached into SCons.Script
# directly to use some of these. Rather than break those SConscript
# files, we're going to propagate these names into the SCons.Script
# namespace here.
#
# Some of these are commented out because it's *really* unlikely anyone
# used them, but we're going to leave the comment here to try to make
# it obvious what to do if the situation arises.
BuildTask = Main.BuildTask
CleanTask = Main.CleanTask
QuestionTask = Main.QuestionTask
#PrintHelp = Main.PrintHelp
#SConscriptSettableOptions = Main.SConscriptSettableOptions
AddOption = Main.AddOption
GetOption = Main.GetOption
SetOption = Main.SetOption
Progress = Main.Progress
GetBuildFailures = Main.GetBuildFailures
#keep_going_on_error = Main.keep_going_on_error
#print_dtree = Main.print_dtree
#print_explanations = Main.print_explanations
#print_includes = Main.print_includes
#print_objects = Main.print_objects
#print_time = Main.print_time
#print_tree = Main.print_tree
#memory_stats = Main.memory_stats
#ignore_errors = Main.ignore_errors
#sconscript_time = Main.sconscript_time
#command_time = Main.command_time
#exit_status = Main.exit_status
#profiling = Main.profiling
#repositories = Main.repositories
#
import SConscript
_SConscript = SConscript
call_stack = _SConscript.call_stack
#
Action = SCons.Action.Action
AddMethod = SCons.Util.AddMethod
AllowSubstExceptions = SCons.Subst.SetAllowableExceptions
Builder = SCons.Builder.Builder
Configure = _SConscript.Configure
Environment = SCons.Environment.Environment
#OptParser = SCons.SConsOptions.OptParser
FindPathDirs = SCons.Scanner.FindPathDirs
Platform = SCons.Platform.Platform
Return = _SConscript.Return
Scanner = SCons.Scanner.Base
Tool = SCons.Tool.Tool
WhereIs = SCons.Util.WhereIs
#
BoolVariable = SCons.Variables.BoolVariable
EnumVariable = SCons.Variables.EnumVariable
ListVariable = SCons.Variables.ListVariable
PackageVariable = SCons.Variables.PackageVariable
PathVariable = SCons.Variables.PathVariable
# Deprecated names that will go away some day.
BoolOption = SCons.Options.BoolOption
EnumOption = SCons.Options.EnumOption
ListOption = SCons.Options.ListOption
PackageOption = SCons.Options.PackageOption
PathOption = SCons.Options.PathOption
# Action factories.
Chmod = SCons.Defaults.Chmod
Copy = SCons.Defaults.Copy
Delete = SCons.Defaults.Delete
Mkdir = SCons.Defaults.Mkdir
Move = SCons.Defaults.Move
Touch = SCons.Defaults.Touch
# Pre-made, public scanners.
CScanner = SCons.Tool.CScanner
DScanner = SCons.Tool.DScanner
DirScanner = SCons.Defaults.DirScanner
ProgramScanner = SCons.Tool.ProgramScanner
SourceFileScanner = SCons.Tool.SourceFileScanner
# Functions we might still convert to Environment methods.
CScan = SCons.Defaults.CScan
DefaultEnvironment = SCons.Defaults.DefaultEnvironment
# Other variables we provide.
class TargetList(collections.UserList):
def _do_nothing(self, *args, **kw):
pass
def _add_Default(self, list):
self.extend(list)
def _clear(self):
del self[:]
ARGUMENTS = {}
ARGLIST = []
BUILD_TARGETS = TargetList()
COMMAND_LINE_TARGETS = []
DEFAULT_TARGETS = []
# BUILD_TARGETS can be modified in the SConscript files. If so, we
# want to treat the modified BUILD_TARGETS list as if they specified
# targets on the command line. To do that, though, we need to know if
# BUILD_TARGETS was modified through "official" APIs or by hand. We do
# this by updating two lists in parallel, the documented BUILD_TARGETS
# list, above, and this internal _build_plus_default targets list which
# should only have "official" API changes. Then Script/Main.py can
# compare these two afterwards to figure out if the user added their
# own targets to BUILD_TARGETS.
_build_plus_default = TargetList()
def _Add_Arguments(alist):
for arg in alist:
a, b = arg.split('=', 1)
ARGUMENTS[a] = b
ARGLIST.append((a, b))
def _Add_Targets(tlist):
if tlist:
COMMAND_LINE_TARGETS.extend(tlist)
BUILD_TARGETS.extend(tlist)
BUILD_TARGETS._add_Default = BUILD_TARGETS._do_nothing
BUILD_TARGETS._clear = BUILD_TARGETS._do_nothing
_build_plus_default.extend(tlist)
_build_plus_default._add_Default = _build_plus_default._do_nothing
_build_plus_default._clear = _build_plus_default._do_nothing
def _Set_Default_Targets_Has_Been_Called(d, fs):
return DEFAULT_TARGETS
def _Set_Default_Targets_Has_Not_Been_Called(d, fs):
if d is None:
d = [fs.Dir('.')]
return d
_Get_Default_Targets = _Set_Default_Targets_Has_Not_Been_Called
def _Set_Default_Targets(env, tlist):
global DEFAULT_TARGETS
global _Get_Default_Targets
_Get_Default_Targets = _Set_Default_Targets_Has_Been_Called
for t in tlist:
if t is None:
# Delete the elements from the list in-place, don't
# reassign an empty list to DEFAULT_TARGETS, so that the
# variables will still point to the same object we point to.
del DEFAULT_TARGETS[:]
BUILD_TARGETS._clear()
_build_plus_default._clear()
elif isinstance(t, SCons.Node.Node):
DEFAULT_TARGETS.append(t)
BUILD_TARGETS._add_Default([t])
_build_plus_default._add_Default([t])
else:
nodes = env.arg2nodes(t, env.fs.Entry)
DEFAULT_TARGETS.extend(nodes)
BUILD_TARGETS._add_Default(nodes)
_build_plus_default._add_Default(nodes)
#
help_text = None
def HelpFunction(text):
global help_text
if SCons.Script.help_text is None:
SCons.Script.help_text = text
else:
help_text = help_text + text
#
# Will be non-zero if we are reading an SConscript file.
sconscript_reading = 0
#
def Variables(files=[], args=ARGUMENTS):
return SCons.Variables.Variables(files, args)
def Options(files=[], args=ARGUMENTS):
return SCons.Options.Options(files, args)
# The list of global functions to add to the SConscript name space
# that end up calling corresponding methods or Builders in the
# DefaultEnvironment().
GlobalDefaultEnvironmentFunctions = [
# Methods from the SConsEnvironment class, above.
'Default',
'EnsurePythonVersion',
'EnsureSConsVersion',
'Exit',
'Export',
'GetLaunchDir',
'Help',
'Import',
#'SConscript', is handled separately, below.
'SConscriptChdir',
# Methods from the Environment.Base class.
'AddPostAction',
'AddPreAction',
'Alias',
'AlwaysBuild',
'BuildDir',
'CacheDir',
'Clean',
#The Command() method is handled separately, below.
'Decider',
'Depends',
'Dir',
'NoClean',
'NoCache',
'Entry',
'Execute',
'File',
'FindFile',
'FindInstalledFiles',
'FindSourceFiles',
'Flatten',
'GetBuildPath',
'Glob',
'Ignore',
'Install',
'InstallAs',
'Literal',
'Local',
'ParseDepends',
'Precious',
'Repository',
'Requires',
'SConsignFile',
'SideEffect',
'SourceCode',
'SourceSignatures',
'Split',
'Tag',
'TargetSignatures',
'Value',
'VariantDir',
]
GlobalDefaultBuilders = [
# Supported builders.
'CFile',
'CXXFile',
'DVI',
'Jar',
'Java',
'JavaH',
'Library',
'M4',
'MSVSProject',
'Object',
'PCH',
'PDF',
'PostScript',
'Program',
'RES',
'RMIC',
'SharedLibrary',
'SharedObject',
'StaticLibrary',
'StaticObject',
'Tar',
'TypeLibrary',
'Zip',
'Package',
]
for name in GlobalDefaultEnvironmentFunctions + GlobalDefaultBuilders:
exec "%s = _SConscript.DefaultEnvironmentCall(%s)" % (name, repr(name))
del name
# There are a handful of variables that used to live in the
# Script/SConscript.py module that some SConscript files out there were
# accessing directly as SCons.Script.SConscript.*. The problem is that
# "SConscript" in this namespace is no longer a module, it's a global
# function call--or more precisely, an object that implements a global
# function call through the default Environment. Nevertheless, we can
# maintain backwards compatibility for SConscripts that were reaching in
# this way by hanging some attributes off the "SConscript" object here.
SConscript = _SConscript.DefaultEnvironmentCall('SConscript')
# Make SConscript look enough like the module it used to be so
# that pychecker doesn't barf.
SConscript.__name__ = 'SConscript'
SConscript.Arguments = ARGUMENTS
SConscript.ArgList = ARGLIST
SConscript.BuildTargets = BUILD_TARGETS
SConscript.CommandLineTargets = COMMAND_LINE_TARGETS
SConscript.DefaultTargets = DEFAULT_TARGETS
# The global Command() function must be handled differently than the
# global functions for other construction environment methods because
# we want people to be able to use Actions that must expand $TARGET
# and $SOURCE later, when (and if) the Action is invoked to build
# the target(s). We do this with the subst=1 argument, which creates
# a DefaultEnvironmentCall instance that wraps up a normal default
# construction environment that performs variable substitution, not a
# proxy that doesn't.
#
# There's a flaw here, though, because any other $-variables on a command
# line will *also* be expanded, each to a null string, but that should
# only be a problem in the unusual case where someone was passing a '$'
# on a command line and *expected* the $ to get through to the shell
# because they were calling Command() and not env.Command()... This is
# unlikely enough that we're going to leave this as is and cross that
# bridge if someone actually comes to it.
Command = _SConscript.DefaultEnvironmentCall('Command', subst=1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
MartinHjelmare/home-assistant | homeassistant/components/zha/core/channels/lighting.py | 7 | 2132 | """
Lighting channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import logging
from . import ZigbeeChannel
_LOGGER = logging.getLogger(__name__)
class ColorChannel(ZigbeeChannel):
"""Color channel."""
CAPABILITIES_COLOR_XY = 0x08
CAPABILITIES_COLOR_TEMP = 0x10
UNSUPPORTED_ATTRIBUTE = 0x86
def __init__(self, cluster, device):
"""Initialize ColorChannel."""
super().__init__(cluster, device)
self._color_capabilities = None
def get_color_capabilities(self):
"""Return the color capabilities."""
return self._color_capabilities
async def async_configure(self):
"""Configure channel."""
await self.fetch_color_capabilities(False)
await super().async_configure()
async def async_initialize(self, from_cache):
"""Initialize channel."""
await self.fetch_color_capabilities(True)
await self.get_attribute_value(
'color_temperature', from_cache=from_cache)
await self.get_attribute_value('current_x', from_cache=from_cache)
await self.get_attribute_value('current_y', from_cache=from_cache)
async def fetch_color_capabilities(self, from_cache):
"""Get the color configuration."""
capabilities = await self.get_attribute_value(
'color_capabilities', from_cache=from_cache)
if capabilities is None:
# ZCL Version 4 devices don't support the color_capabilities
# attribute. In this version XY support is mandatory, but we
# need to probe to determine if the device supports color
# temperature.
capabilities = self.CAPABILITIES_COLOR_XY
result = await self.get_attribute_value(
'color_temperature', from_cache=from_cache)
if result is not self.UNSUPPORTED_ATTRIBUTE:
capabilities |= self.CAPABILITIES_COLOR_TEMP
self._color_capabilities = capabilities
await super().async_initialize(from_cache)
| apache-2.0 |
yonglehou/grpc | tools/buildgen/plugins/list_protos.py | 27 | 2527 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Buildgen .proto files list plugin.
This parses the list of targets from the json build file, and creates
a list called "protos" that contains all of the proto file names.
"""
import re
def mako_plugin(dictionary):
"""The exported plugin code for list_protos.
Some projects generators may want to get the full list of unique .proto files
that are being included in a project. This code extracts all files referenced
in any library or target that ends in .proto, and builds and exports that as
a list called "protos".
"""
libs = dictionary.get('libs', [])
targets = dictionary.get('targets', [])
proto_re = re.compile('(.*)\\.proto')
protos = set()
for lib in libs:
for src in lib.get('src', []):
m = proto_re.match(src)
if m:
protos.add(m.group(1))
for tgt in targets:
for src in tgt.get('src', []):
m = proto_re.match(src)
if m:
protos.add(m.group(1))
protos = sorted(protos)
dictionary['protos'] = protos
| bsd-3-clause |
ryoqun/mitmproxy | libmproxy/contrib/jsbeautifier/unpackers/packer.py | 38 | 3281 | #
# Unpacker for Dean Edward's p.a.c.k.e.r, a part of javascript beautifier
# by Einar Lielmanis <einar@jsbeautifier.org>
#
# written by Stefano Sanfilippo <a.little.coder@gmail.com>
#
# usage:
#
# if detect(some_string):
# unpacked = unpack(some_string)
#
"""Unpacker for Dean Edward's p.a.c.k.e.r"""
import re
import string
from . import UnpackingError
PRIORITY = 1
def detect(source):
"""Detects whether `source` is P.A.C.K.E.R. coded."""
return source.replace(' ', '').startswith('eval(function(p,a,c,k,e,r')
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
try:
unbase = Unbaser(radix)
except TypeError:
raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = match.group(0)
return symtab[unbase(word)] or word
source = re.sub(r'\b\w+\b', lookup, payload)
return _replacestrings(source)
def _filterargs(source):
"""Juice from a source file the four args needed by decoder."""
argsregex = (r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\."
r"split\('\|'\), *(\d+), *(.*)\)\)")
args = re.search(argsregex, source, re.DOTALL).groups()
try:
return args[0], args[3].split('|'), int(args[1]), int(args[2])
except ValueError:
raise UnpackingError('Corrupted p.a.c.k.e.r. data.')
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source
class Unbaser(object):
"""Functor for a given base. Will efficiently convert
strings to natural numbers."""
ALPHABET = {
62 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
95 : (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'[\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
}
def __init__(self, base):
self.base = base
# If base can be handled by int() builtin, let it do it for us
if 2 <= base <= 36:
self.unbase = lambda string: int(string, base)
else:
# Build conversion dictionary cache
try:
self.dictionary = dict((cipher, index) for
index, cipher in enumerate(self.ALPHABET[base]))
except KeyError:
raise TypeError('Unsupported base encoding.')
self.unbase = self._dictunbaser
def __call__(self, string):
return self.unbase(string)
def _dictunbaser(self, string):
"""Decodes a value to an integer."""
ret = 0
for index, cipher in enumerate(string[::-1]):
ret += (self.base ** index) * self.dictionary[cipher]
return ret
| mit |
kyleinprogress/kyleinprogress.com | blog/models.py | 1 | 4224 | import markdown2
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.utils.text import slugify
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
# --------------------------------
# Helper Functions
# --------------------------------
def insertImageRefLinks(markdownText, images):
image_ref = ""
for image in images:
image_url = image.image.url
image_caption = image.comment
image_ref = '%s\n[%s]: %s "%s"' % (image_ref, image, image_url, image_caption)
md = "%s\n%s" % (markdownText, image_ref)
return md
# --------------------------------
# Define models
# --------------------------------
class PostQuerySet(models.QuerySet):
def active(self):
return self.filter(is_active=True)
def published(self):
return self.active().filter(published_date__lte=timezone.now())
# -- Categories --
class Category(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
slug = models.SlugField(max_length=40, unique=True)
accent_image = models.ImageField(upload_to='categories')
def get_absolute_url(self):
return "/category/%s/" % (self.slug)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'categories'
# -- Images --
class Image(models.Model):
name = models.CharField(max_length=100)
comment = models.CharField(max_length=255, blank=True, null=True)
image = models.ImageField(upload_to='%Y/%m/%d', width_field='image_width', height_field='image_height')
image_width = models.IntegerField()
image_height = models.IntegerField()
upload_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
def image_thumbnail(self):
return u'<img src="%s" width=250 />' % (self.image.url)
image_thumbnail.short_description = 'Thumbnail'
image_thumbnail.allow_tags = True
class Meta:
ordering = ["-upload_date"]
verbose_name_plural = 'images'
# -- Posts --
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
summary = models.TextField(blank=True, null=True)
text = models.TextField(
help_text = (
"Use the following notation to attach a picture. ![PictureName][] "
"Make sure the picture name matches a value in the \"Chosen Images\" below."
)
)
created_date = models.DateTimeField(default=timezone.now)
is_active = models.BooleanField(
help_text= (
"Tick to make this entry live (see also the publication date). "
"Note that administrators (like yourself) are allowed to preview "
"inactive entries whereas the general public aren't."
),
default=False,
)
published_date = models.DateTimeField(
verbose_name= ("Publication date"),
help_text= (
"For an entry to be published, it must be active and its "
"publication date must be in the past."
),
blank=True,
null=True
)
slug = models.SlugField(max_length=200, unique=True)
site = models.ForeignKey('sites.Site')
category = models.ForeignKey(Category)
header_image = models.ImageField(upload_to='%Y/%m/%d')
images = models.ManyToManyField(Image, blank=True)
objects = PostQuerySet.as_manager()
def is_published(self):
"""
Return True if the entry is publicly accessible.
"""
return self.is_active and self.published_date <= timezone.now()
is_published.boolean = True
def __str__(self):
return self.title
def get_absolute_url(self):
return "/archives/%s/%s/%s/" % (self.published_date.strftime("%Y"), self.published_date.strftime("%m"), self.slug)
def get_post_year(self):
return self.published_date.strftime("%Y")
def textWithImageLinks(self):
return insertImageRefLinks(self.text, self.images.all())
class Meta:
ordering = ["-published_date"]
verbose_name_plural = 'posts'
get_latest_by = 'published_date'
| gpl-2.0 |
HeatherHillers/RoamMac | src/roam/editorwidgets/featureformwidget.py | 1 | 6120 | import functools
from PyQt4.QtCore import pyqtSignal, QSize, Qt
from PyQt4.QtGui import QToolBar, QWidget, QSizePolicy, QLabel, QIcon
from roam.popupdialogs import DeleteFeatureDialog
from roam.editorwidgets.core import LargeEditorWidget
from roam.flickwidget import FlickCharm
from roam.api import featureform, RoamEvents
from roam.ui.ui_featureformwidget import Ui_Form
class FeatureFormWidget(Ui_Form, QWidget):
# Raise the cancel event, takes a reason and a level
cancel = pyqtSignal(str, int)
featuresaved = pyqtSignal()
featuredeleted = pyqtSignal()
def __init__(self, parent=None):
super(FeatureFormWidget, self).__init__(parent)
self.setupUi(self)
toolbar = QToolBar()
size = QSize(48, 48)
toolbar.setIconSize(size)
style = Qt.ToolButtonTextUnderIcon
toolbar.setToolButtonStyle(style)
self.actionDelete = toolbar.addAction(QIcon(":/icons/delete"), "Delete")
self.actionDelete.triggered.connect(self.delete_feature)
label = 'Required fields marked in <b style="background-color:rgba(255, 221, 48,150)">yellow</b>'
self.missingfieldsLabel = QLabel(label)
self.missingfieldsLabel.hide()
self.missingfieldaction = toolbar.addWidget(self.missingfieldsLabel)
titlespacer = QWidget()
titlespacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
toolbar.addWidget(titlespacer)
self.titlellabel = QLabel(label)
self.titlellabel.setProperty("headerlabel", True)
self.titlelabelaction = toolbar.addWidget(self.titlellabel)
spacer = QWidget()
spacer2 = QWidget()
spacer2.setMinimumWidth(20)
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
spacer2.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
toolbar.addWidget(spacer)
self.actionCancel = toolbar.addAction(QIcon(":/icons/cancel"), "Cancel")
toolbar.addWidget(spacer2)
self.actionSave = toolbar.addAction(QIcon(":/icons/save"), "Save")
self.actionSave.triggered.connect(self.save_feature)
self.layout().insertWidget(0, toolbar)
self.flickwidget = FlickCharm()
self.flickwidget.activateOn(self.scrollArea)
self.featureform = None
self.values = {}
self.config = {}
self.feature = None
def set_featureform(self, featureform):
"""
Note: There can only be one feature form. If you need to show another one make a new FeatureFormWidget
"""
self.featureform = featureform
self.titlellabel.setText(self.featureform.windowTitle())
self.featureform.formvalidation.connect(self._update_validation)
self.featureform.helprequest.connect(functools.partial(RoamEvents.helprequest.emit, self))
self.featureform.showlargewidget.connect(RoamEvents.show_widget.emit)
self.featureform.enablesave.connect(self.actionSave.setEnabled)
self.featureform.enablesave.connect(self.actionSave.setVisible)
self.featureform.rejected.connect(self.cancel.emit)
self.featureformarea.layout().addWidget(self.featureform)
def delete_feature(self):
try:
msg = self.featureform.deletemessage
except AttributeError:
msg = 'Do you really want to delete this feature?'
box = DeleteFeatureDialog(msg)
if not box.exec_():
return
try:
self.featureform.delete()
except featureform.DeleteFeatureException as ex:
RoamEvents.raisemessage(*ex.error)
self.featureform.featuredeleted(self.feature)
self.featuredeleted.emit()
def save_feature(self):
try:
self.featureform.save()
except featureform.MissingValuesException as ex:
RoamEvents.raisemessage(*ex.error)
return
except featureform.FeatureSaveException as ex:
RoamEvents.raisemessage(*ex.error)
self.featuresaved.emit()
RoamEvents.featuresaved.emit()
def set_config(self, config):
self.config = config
editmode = config['editmode']
allowsave = config.get('allowsave', True)
self.feature = config.get('feature', None)
self.featureform.feature = self.feature
self.featureform.editingmode = editmode
self.actionDelete.setVisible(editmode)
self.actionSave.setEnabled(allowsave)
def _update_validation(self, passed):
# Show the error if there is missing fields
self.missingfieldaction.setVisible(not passed)
def bind_values(self, values):
self.values = values
self.featureform.bindvalues(values)
def after_load(self):
self.featureform.loaded()
def before_load(self):
self.featureform.load(self.config['feature'], self.config['layers'], self.values)
class FeatureFormWidgetEditor(LargeEditorWidget):
def __init__(self, *args, **kwargs):
super(FeatureFormWidgetEditor, self).__init__(*args, **kwargs)
def createWidget(self, parent=None):
config = self.initconfig
form = config['form']
canvas = config.get('canvas', None)
formwidget = FeatureFormWidget()
editmode = config['editmode']
featureform = form.create_featureform(None, defaults={}, canvas=canvas, editmode=editmode)
formwidget.set_featureform(featureform)
return formwidget
def initWidget(self, widget):
widget.actionCancel.triggered.connect(self.cancelform)
widget.featuresaved.connect(self.emitfished)
widget.featuredeleted.connect(self.emitfished)
def cancelform(self):
self.emitcancel()
def updatefromconfig(self):
self.widget.set_config(self.config)
def before_load(self):
self.widget.before_load()
def value(self):
values, savedvalues = self.widget.featureform.getvalues()
return values
def setvalue(self, value):
self.widget.bind_values(value)
def after_load(self):
self.widget.after_load()
| gpl-2.0 |
kholidfu/django | django/utils/autoreload.py | 295 | 10877 | # Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import signal
import sys
import time
import traceback
from django.apps import apps
from django.conf import settings
from django.core.signals import request_finished
from django.utils import six
from django.utils._os import npath
from django.utils.six.moves import _thread as thread
# This import does nothing, but it's necessary to avoid some race conditions
# in the threading module. See http://code.djangoproject.com/ticket/2330 .
try:
import threading # NOQA
except ImportError:
pass
try:
import termios
except ImportError:
termios = None
USE_INOTIFY = False
try:
# Test whether inotify is enabled and likely to work
import pyinotify
fd = pyinotify.INotifyWrapper.create().inotify_init()
if fd >= 0:
USE_INOTIFY = True
os.close(fd)
except ImportError:
pass
RUN_RELOADER = True
FILE_MODIFIED = 1
I18N_MODIFIED = 2
_mtimes = {}
_win = (sys.platform == "win32")
_exception = None
_error_files = []
_cached_modules = set()
_cached_filenames = []
def gen_filenames(only_new=False):
"""
Returns a list of filenames referenced in sys.modules and translation
files.
"""
# N.B. ``list(...)`` is needed, because this runs in parallel with
# application code which might be mutating ``sys.modules``, and this will
# fail with RuntimeError: cannot mutate dictionary while iterating
global _cached_modules, _cached_filenames
module_values = set(sys.modules.values())
_cached_filenames = clean_files(_cached_filenames)
if _cached_modules == module_values:
# No changes in module list, short-circuit the function
if only_new:
return []
else:
return _cached_filenames + clean_files(_error_files)
new_modules = module_values - _cached_modules
new_filenames = clean_files(
[filename.__file__ for filename in new_modules
if hasattr(filename, '__file__')])
if not _cached_filenames and settings.USE_I18N:
# Add the names of the .mo files that can be generated
# by compilemessages management command to the list of files watched.
basedirs = [os.path.join(os.path.dirname(os.path.dirname(__file__)),
'conf', 'locale'),
'locale']
for app_config in reversed(list(apps.get_app_configs())):
basedirs.append(os.path.join(npath(app_config.path), 'locale'))
basedirs.extend(settings.LOCALE_PATHS)
basedirs = [os.path.abspath(basedir) for basedir in basedirs
if os.path.isdir(basedir)]
for basedir in basedirs:
for dirpath, dirnames, locale_filenames in os.walk(basedir):
for filename in locale_filenames:
if filename.endswith('.mo'):
new_filenames.append(os.path.join(dirpath, filename))
_cached_modules = _cached_modules.union(new_modules)
_cached_filenames += new_filenames
if only_new:
return new_filenames + clean_files(_error_files)
else:
return _cached_filenames + clean_files(_error_files)
def clean_files(filelist):
filenames = []
for filename in filelist:
if not filename:
continue
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
if os.path.exists(filename):
filenames.append(filename)
return filenames
def reset_translations():
import gettext
from django.utils.translation import trans_real
gettext._translations = {}
trans_real._translations = {}
trans_real._default = None
trans_real._active = threading.local()
def inotify_code_changed():
"""
Checks for changed code using inotify. After being called
it blocks until a change event has been fired.
"""
class EventHandler(pyinotify.ProcessEvent):
modified_code = None
def process_default(self, event):
if event.path.endswith('.mo'):
EventHandler.modified_code = I18N_MODIFIED
else:
EventHandler.modified_code = FILE_MODIFIED
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm, EventHandler())
def update_watch(sender=None, **kwargs):
if sender and getattr(sender, 'handles_files', False):
# No need to update watches when request serves files.
# (sender is supposed to be a django.core.handlers.BaseHandler subclass)
return
mask = (
pyinotify.IN_MODIFY |
pyinotify.IN_DELETE |
pyinotify.IN_ATTRIB |
pyinotify.IN_MOVED_FROM |
pyinotify.IN_MOVED_TO |
pyinotify.IN_CREATE |
pyinotify.IN_DELETE_SELF |
pyinotify.IN_MOVE_SELF
)
for path in gen_filenames(only_new=True):
wm.add_watch(path, mask)
# New modules may get imported when a request is processed.
request_finished.connect(update_watch)
# Block until an event happens.
update_watch()
notifier.check_events(timeout=None)
notifier.read_events()
notifier.process_events()
notifier.stop()
# If we are here the code must have changed.
return EventHandler.modified_code
def code_changed():
global _mtimes, _win
for filename in gen_filenames():
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
try:
del _error_files[_error_files.index(filename)]
except ValueError:
pass
return I18N_MODIFIED if filename.endswith('.mo') else FILE_MODIFIED
return False
def check_errors(fn):
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
six.reraise(*_exception)
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def reloader_thread():
ensure_echo_on()
if USE_INOTIFY:
fn = inotify_code_changed
else:
fn = code_changed
while RUN_RELOADER:
change = fn()
if change == FILE_MODIFIED:
sys.exit(3) # force reload
elif change == I18N_MODIFIED:
reset_translations()
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
exit_code = restart_with_reloader()
if exit_code < 0:
os.kill(os.getpid(), -exit_code)
else:
sys.exit(exit_code)
except KeyboardInterrupt:
pass
def jython_reloader(main_func, args, kwargs):
from _systemrestart import SystemRestart
thread.start_new_thread(main_func, args)
while True:
if code_changed():
raise SystemRestart
time.sleep(1)
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if sys.platform.startswith('java'):
reloader = jython_reloader
else:
reloader = python_reloader
wrapped_main_func = check_errors(main_func)
reloader(wrapped_main_func, args, kwargs)
| bsd-3-clause |
StackStorm/mistral | mistral/api/controllers/v2/task.py | 1 | 17477 | # Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_log import log as logging
from pecan import rest
import sqlalchemy as sa
import tenacity
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from mistral.api import access_control as acl
from mistral.api.controllers.v2 import action_execution
from mistral.api.controllers.v2 import resources
from mistral.api.controllers.v2 import types
from mistral import context
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral.lang import parser as spec_parser
from mistral.rpc import clients as rpc
from mistral.utils import filter_utils
from mistral.utils import rest_utils
from mistral.workflow import data_flow
from mistral.workflow import states
LOG = logging.getLogger(__name__)
STATE_TYPES = wtypes.Enum(str, states.IDLE, states.RUNNING, states.SUCCESS,
states.ERROR, states.RUNNING_DELAYED)
def _get_task_resource_with_result(task_ex):
task = resources.Task.from_db_model(task_ex)
task.result = json.dumps(data_flow.get_task_execution_result(task_ex))
return task
class TaskExecutionsController(rest.RestController):
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Executions, types.uuid, types.uuid, int,
types.uniquelist, types.list, types.uniquelist,
wtypes.text, types.uuid, wtypes.text, types.jsontype,
STATE_TYPES, wtypes.text, types.jsontype,
types.jsontype, wtypes.text, wtypes.text)
def get_all(self, task_execution_id, marker=None, limit=None,
sort_keys='created_at', sort_dirs='asc', fields='',
workflow_name=None, workflow_id=None, description=None,
params=None, state=None, state_info=None, input=None,
output=None, created_at=None, updated_at=None):
"""Return all executions that belong to the given task execution.
:param task_execution_id: Task task execution ID.
:param marker: Optional. Pagination marker for large data sets.
:param limit: Optional. Maximum number of resources to return in a
single result. Default value is None for backward
compatibility.
:param sort_keys: Optional. Columns to sort results by.
Default: created_at, which is backward compatible.
:param sort_dirs: Optional. Directions to sort corresponding to
sort_keys, "asc" or "desc" can be chosen.
Default: desc. The length of sort_dirs can be equal
or less than that of sort_keys.
:param fields: Optional. A specified list of fields of the resource to
be returned. 'id' will be included automatically in
fields if it's provided, since it will be used when
constructing 'next' link.
:param workflow_name: Optional. Keep only resources with a specific
workflow name.
:param workflow_id: Optional. Keep only resources with a specific
workflow ID.
:param description: Optional. Keep only resources with a specific
description.
:param params: Optional. Keep only resources with specific parameters.
:param state: Optional. Keep only resources with a specific state.
:param state_info: Optional. Keep only resources with specific
state information.
:param input: Optional. Keep only resources with a specific input.
:param output: Optional. Keep only resources with a specific output.
:param created_at: Optional. Keep only resources created at a specific
time and date.
:param updated_at: Optional. Keep only resources with specific latest
update time and date.
"""
acl.enforce('executions:list', context.ctx())
filters = filter_utils.create_filters_from_request_params(
task_execution_id=task_execution_id,
created_at=created_at,
workflow_name=workflow_name,
workflow_id=workflow_id,
params=params,
state=state,
state_info=state_info,
input=input,
output=output,
updated_at=updated_at,
description=description
)
LOG.debug(
"Fetch executions. marker=%s, limit=%s, sort_keys=%s, "
"sort_dirs=%s, filters=%s", marker, limit, sort_keys, sort_dirs,
filters
)
return rest_utils.get_all(
resources.Executions,
resources.Execution,
db_api.get_workflow_executions,
db_api.get_workflow_execution,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
**filters
)
# Use retries to prevent possible failures.
@tenacity.retry(
retry=tenacity.retry_if_exception_type(sa.exc.OperationalError),
stop=tenacity.stop_after_attempt(10),
wait=tenacity.wait_incrementing(increment=100) # 0.1 seconds
)
def _get_task_execution(id):
with db_api.transaction():
task_ex = db_api.get_task_execution(id)
return _get_task_resource_with_result(task_ex)
class TasksController(rest.RestController):
action_executions = action_execution.TasksActionExecutionController()
workflow_executions = TaskExecutionsController()
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Task, wtypes.text)
def get(self, id):
"""Return the specified task.
:param id: UUID of task to retrieve
"""
acl.enforce('tasks:get', context.ctx())
LOG.debug("Fetch task [id=%s]", id)
return _get_task_execution(id)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Tasks, types.uuid, int, types.uniquelist,
types.list, types.uniquelist, wtypes.text,
wtypes.text, types.uuid, types.uuid, STATE_TYPES,
wtypes.text, wtypes.text, types.jsontype, bool,
wtypes.text, wtypes.text, bool, types.jsontype)
def get_all(self, marker=None, limit=None, sort_keys='created_at',
sort_dirs='asc', fields='', name=None, workflow_name=None,
workflow_id=None, workflow_execution_id=None, state=None,
state_info=None, result=None, published=None, processed=None,
created_at=None, updated_at=None, reset=None, env=None):
"""Return all tasks.
Where project_id is the same as the requester or
project_id is different but the scope is public.
:param marker: Optional. Pagination marker for large data sets.
:param limit: Optional. Maximum number of resources to return in a
single result. Default value is None for backward
compatibility.
:param sort_keys: Optional. Columns to sort results by.
Default: created_at, which is backward compatible.
:param sort_dirs: Optional. Directions to sort corresponding to
sort_keys, "asc" or "desc" can be chosen.
Default: desc. The length of sort_dirs can be equal
or less than that of sort_keys.
:param fields: Optional. A specified list of fields of the resource to
be returned. 'id' will be included automatically in
fields if it's provided, since it will be used when
constructing 'next' link.
:param name: Optional. Keep only resources with a specific name.
:param workflow_name: Optional. Keep only resources with a specific
workflow name.
:param workflow_id: Optional. Keep only resources with a specific
workflow ID.
:param workflow_execution_id: Optional. Keep only resources with a
specific workflow execution ID.
:param state: Optional. Keep only resources with a specific state.
:param state_info: Optional. Keep only resources with specific
state information.
:param result: Optional. Keep only resources with a specific result.
:param published: Optional. Keep only resources with specific
published content.
:param processed: Optional. Keep only resources which have been
processed or not.
:param reset: Optional. Keep only resources which have been reset or
not.
:param env: Optional. Keep only resources with a specific environment.
:param created_at: Optional. Keep only resources created at a specific
time and date.
:param updated_at: Optional. Keep only resources with specific latest
update time and date.
"""
acl.enforce('tasks:list', context.ctx())
filters = filter_utils.create_filters_from_request_params(
created_at=created_at,
workflow_name=workflow_name,
workflow_id=workflow_id,
state=state,
state_info=state_info,
updated_at=updated_at,
name=name,
workflow_execution_id=workflow_execution_id,
result=result,
published=published,
processed=processed,
reset=reset,
env=env
)
LOG.debug(
"Fetch tasks. marker=%s, limit=%s, sort_keys=%s, sort_dirs=%s,"
" filters=%s", marker, limit, sort_keys, sort_dirs, filters
)
return rest_utils.get_all(
resources.Tasks,
resources.Task,
db_api.get_task_executions,
db_api.get_task_execution,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
**filters
)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Task, wtypes.text, body=resources.Task)
def put(self, id, task):
"""Update the specified task execution.
:param id: Task execution ID.
:param task: Task execution object.
"""
acl.enforce('tasks:update', context.ctx())
LOG.debug("Update task execution [id=%s, task=%s]", id, task)
with db_api.transaction():
task_ex = db_api.get_task_execution(id)
task_spec = spec_parser.get_task_spec(task_ex.spec)
task_name = task.name or None
reset = task.reset
env = task.env or None
if task_name and task_name != task_ex.name:
raise exc.WorkflowException('Task name does not match.')
wf_ex = db_api.get_workflow_execution(
task_ex.workflow_execution_id
)
wf_name = task.workflow_name or None
if wf_name and wf_name != wf_ex.name:
raise exc.WorkflowException('Workflow name does not match.')
if task.state != states.RUNNING:
raise exc.WorkflowException(
'Invalid task state. '
'Only updating task to rerun is supported.'
)
if task_ex.state != states.ERROR:
raise exc.WorkflowException(
'The current task execution must be in ERROR for rerun.'
' Only updating task to rerun is supported.'
)
if not task_spec.get_with_items() and not reset:
raise exc.WorkflowException(
'Only with-items task has the option to not reset.'
)
rpc.get_engine_client().rerun_workflow(
task_ex.id,
reset=reset,
env=env
)
with db_api.transaction():
task_ex = db_api.get_task_execution(id)
return _get_task_resource_with_result(task_ex)
class ExecutionTasksController(rest.RestController):
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Tasks, types.uuid, types.uuid, int,
types.uniquelist, types.list, types.uniquelist,
wtypes.text, wtypes.text, types.uuid, STATE_TYPES,
wtypes.text, wtypes.text, types.jsontype, bool,
wtypes.text, wtypes.text, bool, types.jsontype)
def get_all(self, workflow_execution_id, marker=None, limit=None,
sort_keys='created_at', sort_dirs='asc', fields='', name=None,
workflow_name=None, workflow_id=None, state=None,
state_info=None, result=None, published=None, processed=None,
created_at=None, updated_at=None, reset=None, env=None):
"""Return all tasks within the execution.
Where project_id is the same as the requester or
project_id is different but the scope is public.
:param marker: Optional. Pagination marker for large data sets.
:param limit: Optional. Maximum number of resources to return in a
single result. Default value is None for backward
compatibility.
:param sort_keys: Optional. Columns to sort results by.
Default: created_at, which is backward compatible.
:param sort_dirs: Optional. Directions to sort corresponding to
sort_keys, "asc" or "desc" can be chosen.
Default: desc. The length of sort_dirs can be equal
or less than that of sort_keys.
:param fields: Optional. A specified list of fields of the resource to
be returned. 'id' will be included automatically in
fields if it's provided, since it will be used when
constructing 'next' link.
:param name: Optional. Keep only resources with a specific name.
:param workflow_name: Optional. Keep only resources with a specific
workflow name.
:param workflow_id: Optional. Keep only resources with a specific
workflow ID.
:param workflow_execution_id: Optional. Keep only resources with a
specific workflow execution ID.
:param state: Optional. Keep only resources with a specific state.
:param state_info: Optional. Keep only resources with specific
state information.
:param result: Optional. Keep only resources with a specific result.
:param published: Optional. Keep only resources with specific
published content.
:param processed: Optional. Keep only resources which have been
processed or not.
:param reset: Optional. Keep only resources which have been reset or
not.
:param env: Optional. Keep only resources with a specific environment.
:param created_at: Optional. Keep only resources created at a specific
time and date.
:param updated_at: Optional. Keep only resources with specific latest
update time and date.
"""
acl.enforce('tasks:list', context.ctx())
filters = filter_utils.create_filters_from_request_params(
workflow_execution_id=workflow_execution_id,
created_at=created_at,
workflow_name=workflow_name,
workflow_id=workflow_id,
state=state,
state_info=state_info,
updated_at=updated_at,
name=name,
result=result,
published=published,
processed=processed,
reset=reset,
env=env
)
LOG.debug(
"Fetch tasks. workflow_execution_id=%s, marker=%s, limit=%s, "
"sort_keys=%s, sort_dirs=%s, filters=%s",
workflow_execution_id, marker, limit, sort_keys, sort_dirs,
filters
)
return rest_utils.get_all(
resources.Tasks,
resources.Task,
db_api.get_task_executions,
db_api.get_task_execution,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
**filters
)
| apache-2.0 |
unicefuganda/mics | survey/migrations/0016_auto__del_field_household_name__add_field_household_head__add_field_ho.py | 2 | 19899 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Household.name'
db.delete_column(u'survey_household', 'name')
# Adding field 'Household.head'
db.add_column(u'survey_household', 'head',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['survey.HouseholdHead'], null=True),
keep_default=False)
# Adding field 'Household.number_of_males'
db.add_column(u'survey_household', 'number_of_males',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.number_of_females'
db.add_column(u'survey_household', 'number_of_females',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_5_12_years'
db.add_column(u'survey_household', 'children_5_12_years',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_13_17_years'
db.add_column(u'survey_household', 'children_13_17_years',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_0_5_months'
db.add_column(u'survey_household', 'children_0_5_months',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_6_11_months'
db.add_column(u'survey_household', 'children_6_11_months',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_12_23_months'
db.add_column(u'survey_household', 'children_12_23_months',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_24_59_months'
db.add_column(u'survey_household', 'children_24_59_months',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.women_15_19_years'
db.add_column(u'survey_household', 'women_15_19_years',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_20_49_years'
db.add_column(u'survey_household', 'children_20_49_years',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Adding field 'Household.name'
db.add_column(u'survey_household', 'name',
self.gf('django.db.models.fields.CharField')(default='dummy', max_length=100),
keep_default=False)
# Deleting field 'Household.head'
db.delete_column(u'survey_household', 'head_id')
# Deleting field 'Household.number_of_males'
db.delete_column(u'survey_household', 'number_of_males')
# Deleting field 'Household.number_of_females'
db.delete_column(u'survey_household', 'number_of_females')
# Deleting field 'Household.children_5_12_years'
db.delete_column(u'survey_household', 'children_5_12_years')
# Deleting field 'Household.children_13_17_years'
db.delete_column(u'survey_household', 'children_13_17_years')
# Deleting field 'Household.children_0_5_months'
db.delete_column(u'survey_household', 'children_0_5_months')
# Deleting field 'Household.children_6_11_months'
db.delete_column(u'survey_household', 'children_6_11_months')
# Deleting field 'Household.children_12_23_months'
db.delete_column(u'survey_household', 'children_12_23_months')
# Deleting field 'Household.children_24_59_months'
db.delete_column(u'survey_household', 'children_24_59_months')
# Deleting field 'Household.women_15_19_years'
db.delete_column(u'survey_household', 'women_15_19_years')
# Deleting field 'Household.children_20_49_years'
db.delete_column(u'survey_household', 'children_20_49_years')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'rule'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.batch': {
'Meta': {'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batches'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'children_0_5_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_12_23_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_13_17_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_20_49_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_24_59_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_5_12_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_6_11_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'head': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.HouseholdHead']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'number_of_females': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'number_of_males': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'women_15_19_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Household'", 'max_length': '100'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'resident_since': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'survey.indicator': {
'Meta': {'object_name': 'Indicator'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicators'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Indicator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'survey.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'})
}
}
complete_apps = ['survey'] | bsd-3-clause |
Fornost461/drafts-and-stuff | Python/organizer/types/task.py | 1 | 1204 | #!/usr/bin/env python3
from node import Node
import datetime as d
class Task:
# todo class Task(Node):
def __init__(self, description="", duration=d.timedelta(), start=d.datetime.now(), subtasks=[]):
self.description = description
self.duration = duration
self.start = start
self.subtasks = subtasks
def actualizeDuration(self):
if subtasks:
self.duration = sum(map(getDuration, self.subtasks))
def to_string(self, min_indent_level=0):
# first line
current_indent_level = min_indent_level
res = current_indent_level * tab + self.start.isoformat() + "\n" # todo: use strftime(format) instead
# second line
current_indent_level += 1
res += current_indent_level * tab + self.description
self.actualizeDuration()
if self.duration != None:
res += " ({})".format(self.duration) # todo: use something like strftime(format) instead
res += "\n"
# following lines
current_indent_level += 1
for s in self.subtasks:
res += s.to_string(current_indent_level)
return res
#~ if __name__ == "__main__":
#~ pass
| cc0-1.0 |
coolsvap/sos | sos/plugins/apparmor.py | 12 | 1415 | # Copyright (c) 2012 Adam Stokes <adam.stokes@canonical.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, UbuntuPlugin
class Apparmor(Plugin, UbuntuPlugin):
"""Apparmor mandatory access control
"""
plugin_name = 'apparmor'
profiles = ('security',)
def setup(self):
self.add_copy_spec([
"/etc/apparmor*"
])
self.add_forbidden_path("/etc/apparmor.d/cache")
self.add_forbidden_path("/etc/apparmor.d/libvirt/libvirt*")
self.add_forbidden_path("/etc/apparmor.d/abstractions")
self.add_cmd_output([
"apparmor_status",
"ls -alh /etc/apparmor.d/abstractions",
"ls -alh /etc/apparmor.d/libvirt",
])
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
jacobSingh/allbackgammongroup | abg_stats/app.py | 1 | 2669 | import logging
l = logging.getLogger("abg")
l.error("Before imports in app.py")
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
l.error("After Flask import")
from abg_stats import commands, public, user
l.error("blueprints")
from abg_stats import abg
l.error("ABG")
from abg_stats.assets import assets
from abg_stats.extensions import bcrypt, cache, csrf_protect, db, debug_toolbar, login_manager, migrate
from abg_stats.settings import ProdConfig
l.error("After imports in app.py")
def create_app(config_object=ProdConfig):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
app.register_blueprint(abg.views.other.blueprint, url_prefix="/abg")
app.register_blueprint(abg.views.player.blueprint, url_prefix="/abg/player")
app.register_blueprint(abg.views.tournaments.blueprint, url_prefix="/abg/tournaments")
return None
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
| bsd-3-clause |
tedder/ansible | lib/ansible/module_utils/docker/swarm.py | 5 | 8325 | # (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
# (c) Thierry Bouvet (@tbouvet)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import json
try:
from docker.errors import APIError
except ImportError:
# missing docker-py handled in ansible.module_utils.docker.common
pass
from ansible.module_utils._text import to_native
from ansible.module_utils.docker.common import AnsibleDockerClient
class AnsibleDockerSwarmClient(AnsibleDockerClient):
def __init__(self, **kwargs):
super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
def get_swarm_node_id(self):
"""
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
of Docker host the module is executed on
:return:
NodeID of host or 'None' if not part of Swarm
"""
try:
info = self.info()
except APIError as exc:
self.fail("Failed to get node information for %s" % to_native(exc))
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return swarm_info['Swarm']['NodeID']
return None
def check_if_swarm_node(self, node_id=None):
"""
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
it is not executed on Swarm manager
:param node_id: Node identifier
:return:
bool: True if node is part of Swarm, False otherwise
"""
if node_id is None:
try:
info = self.info()
except APIError:
self.fail("Failed to get host information.")
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return True
return False
else:
try:
node_info = self.get_node_inspect(node_id=node_id)
except APIError:
return
if node_info['ID'] is not None:
return True
return False
def check_if_swarm_manager(self):
"""
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
is performed. The inspect_swarm() will fail if node is not a manager
:return: True if node is Swarm Manager, False otherwise
"""
try:
self.inspect_swarm()
return True
except APIError:
return False
def fail_task_if_not_swarm_manager(self):
"""
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
"""
if not self.check_if_swarm_manager():
self.fail("Error running docker swarm module: must run on swarm manager node")
def check_if_swarm_worker(self):
"""
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
:return: True if node is Swarm Worker, False otherwise
"""
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
return True
return False
def check_if_swarm_node_is_down(self, node_id=None):
"""
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
host that is not part of Swarm it will fail the playbook
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
True if node is part of swarm but its state is down, False otherwise
"""
if node_id is None:
node_id = self.get_swarm_node_id()
node_info = self.get_node_inspect(node_id=node_id)
if node_info['Status']['State'] == 'down':
return True
return False
def get_node_inspect(self, node_id=None, skip_missing=False):
"""
Returns Swarm node info as in 'docker node inspect' command about single node
:param skip_missing: if True then function will return None instead of failing the task
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
Single node information structure
"""
if node_id is None:
node_id = self.get_swarm_node_id()
if node_id is None:
self.fail("Failed to get node information.")
try:
node_info = self.inspect_node(node_id=node_id)
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
if exc.status_code == 404:
if skip_missing is False:
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
else:
return None
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
return node_info
def get_all_nodes_inspect(self):
"""
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
:return:
Structure with information about all nodes
"""
try:
node_info = self.nodes()
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
return node_info
def get_all_nodes_list(self, output='short'):
"""
Returns list of nodes registered in Swarm
:param output: Defines format of returned data
:return:
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
if 'output' is 'long' then returns data is list of dict containing the attributes as in
output of command 'docker node ls'
"""
nodes_list = []
nodes_inspect = self.get_all_nodes_inspect()
if nodes_inspect is None:
return None
if output == 'short':
for node in nodes_inspect:
nodes_list.append(node['Description']['Hostname'])
elif output == 'long':
for node in nodes_inspect:
node_property = {}
node_property.update({'ID': node['ID']})
node_property.update({'Hostname': node['Description']['Hostname']})
node_property.update({'Status': node['Status']['State']})
node_property.update({'Availability': node['Spec']['Availability']})
if 'ManagerStatus' in node:
if node['ManagerStatus']['Leader'] is True:
node_property.update({'Leader': True})
node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
nodes_list.append(node_property)
else:
return None
return nodes_list
def get_node_name_by_id(self, nodeid):
return self.get_node_inspect(nodeid)['Description']['Hostname']
| gpl-3.0 |
wiltonlazary/arangodb | 3rdParty/V8/V8-5.0.71.39/tools/swarming_client/third_party/rsa/rsa/util.py | 79 | 2938 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Utility functions.'''
from __future__ import with_statement, print_function
import sys
from optparse import OptionParser
import rsa.key
def private_to_public():
'''Reads a private key and outputs the corresponding public key.'''
# Parse the CLI options
parser = OptionParser(usage='usage: %prog [options]',
description='Reads a private key and outputs the '
'corresponding public key. Both private and public keys use '
'the format described in PKCS#1 v1.5')
parser.add_option('-i', '--input', dest='infilename', type='string',
help='Input filename. Reads from stdin if not specified')
parser.add_option('-o', '--output', dest='outfilename', type='string',
help='Output filename. Writes to stdout of not specified')
parser.add_option('--inform', dest='inform',
help='key format of input - default PEM',
choices=('PEM', 'DER'), default='PEM')
parser.add_option('--outform', dest='outform',
help='key format of output - default PEM',
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv)
# Read the input data
if cli.infilename:
print('Reading private key from %s in %s format' % \
(cli.infilename, cli.inform), file=sys.stderr)
with open(cli.infilename, 'rb') as infile:
in_data = infile.read()
else:
print('Reading private key from stdin in %s format' % cli.inform,
file=sys.stderr)
in_data = sys.stdin.read().encode('ascii')
assert type(in_data) == bytes, type(in_data)
# Take the public fields and create a public key
priv_key = rsa.key.PrivateKey.load_pkcs1(in_data, cli.inform)
pub_key = rsa.key.PublicKey(priv_key.n, priv_key.e)
# Save to the output file
out_data = pub_key.save_pkcs1(cli.outform)
if cli.outfilename:
print('Writing public key to %s in %s format' % \
(cli.outfilename, cli.outform), file=sys.stderr)
with open(cli.outfilename, 'wb') as outfile:
outfile.write(out_data)
else:
print('Writing public key to stdout in %s format' % cli.outform,
file=sys.stderr)
sys.stdout.write(out_data.decode('ascii'))
| apache-2.0 |
savoirfairelinux/OpenUpgrade | addons/account_voucher/report/__init__.py | 378 | 1083 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_voucher_sales_receipt
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jacobwindsor/django-reviewable | Reviewable/models.py | 2 | 4270 | from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db import models
from stream_django.activity import Activity
class ReviewWithActivity(Activity):
""" Enable GetStream.io support.
See https://github.com/GetStream/stream-django for more information
"""
@property
def activity_object_attr(self):
return self
class ReviewNoActivity(object):
pass
if getattr(settings, 'REVIEW_STREAM_ENABLED', False):
# Enable get_stream if enabled in settings
BaseReview = ReviewWithActivity
else:
BaseReview = ReviewNoActivity
class Review(models.Model, BaseReview):
user = models.ForeignKey(
getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
on_delete=models.CASCADE) # The user who created the review
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
review = models.TextField(help_text="What do you think? Try to keep it concise but not too short. "
"We recommend a minimum of 500 characters.")
RATINGS = (
(1, '1 Star'),
(2, '2 Star'),
(3, '3 Star'),
(4, '4 Star'),
(5, '5 Star')
)
rating = models.IntegerField(choices=getattr(settings, 'REVIEW_RATING_CHOICES', RATINGS))
title = models.CharField(max_length=100)
# Generic contentype
# See https://docs.djangoproject.com/en/1.9/ref/contrib/contenttypes/
review_object_id = models.PositiveIntegerField() # The id of the related object
review_object_content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) # The content type of the related object
review_object = GenericForeignKey('review_object_content_type', 'review_object_id') # The related object
@property
def activity_object_attr(self):
return self
def get_absolute_url(self):
return reverse('Reviewable:review-detail', kwargs={'pk': self.pk})
class Reviewable(object):
"""Mixin that makes a model reviewable.
Contains methods to deal with deletions of an object and therefore the deletions of its reviews.
Contains methods to get a models reviews.
"""
@property
def content_type(self):
return ContentType.objects.get_for_model(self)
@property
def reviews(self):
"""Return the reviews for the object
Reverse generic relation not working so had to do manually.
https://docs.djangoproject.com/en/1.9/ref/contrib/contenttypes/#reverse-generic-relations
"""
# TODO: Fix this so uses reverse generic relation
return Review.objects.filter(review_object_content_type=self.content_type, review_object_id=self.id)
def review(self, user, review, title, rating):
"""Create a review for the object
keyword arguments:
user - the user instance who is doing the reviewing
review - A string of the review to be left
rating - The star rating the user gave the object. 1-5
"""
review = Review(user=user, title=title, review=review, rating=rating, review_object=self)
review.save()
return review
def delete_reviews(sender, instance, **kwargs):
"""Deletes the object's reviews
This is good to use as a receiver for a post_delete signal.
To implement create a post_delete signal for the reviewable model that uses this as its listener
e.g post_delete.connect(Object.delete_reviews, sender=Object)
"""
instance.reviews.delete()
@property
def review_count(self):
"""Returns the total number of reviews of the object"""
return self.reviews.count()
@property
def average_rating(self):
"""Gets the average score of the object's rating"""
reviews = self.reviews
if not reviews:
return 0
count = self.review_count
total = 0
for review in reviews:
rating = review.rating
total += rating
if total > 0:
return total/count
return 0
| mit |
userzimmermann/robotframework-python3 | src/robot/running/keywords.py | 1 | 11354 | # Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import text_type as unicode
from robot.utils import (format_assign_message, get_elapsed_time,
get_error_message, get_timestamp, plural_or_not)
from robot.errors import (ContinueForLoop, DataError, ExecutionFailed,
ExecutionFailures, ExecutionPassed, ExitForLoop,
HandlerExecutionFailed)
from robot.variables import is_scalar_var, VariableAssigner
class Keywords(object):
def __init__(self, steps, templated=False):
self._keywords = []
self._templated = templated
for s in steps:
self._add_step(s)
def _add_step(self, step):
if step.is_comment():
return
if step.is_for_loop():
keyword = ForLoop(step, self._templated)
else:
keyword = Keyword(step.keyword, step.args, step.assign)
self.add_keyword(keyword)
def add_keyword(self, keyword):
self._keywords.append(keyword)
def run(self, context):
errors = []
for kw in self._keywords:
try:
kw.run(context)
except ExecutionPassed as exception:
exception.set_earlier_failures(errors)
raise exception
except ExecutionFailed as exception:
errors.extend(exception.get_errors())
if not exception.can_continue(context.in_teardown,
self._templated,
context.dry_run):
break
if errors:
raise ExecutionFailures(errors)
def __bool__(self):
return bool(self._keywords)
#PY2
def __nonzero__(self):
return self.__bool__()
def __iter__(self):
return iter(self._keywords)
class _BaseKeyword:
def __init__(self, name='', args=None, doc='', timeout='', type='kw'):
self.name = name
self.args = args or []
self.doc = doc
self.timeout = timeout
self.type = type
self.message = ''
self.status = 'NOT_RUN'
@property
def passed(self):
return self.status == 'PASS'
def serialize(self, serializer):
serializer.start_keyword(self)
serializer.end_keyword(self)
def _get_status(self, error):
if not error:
return 'PASS'
if isinstance(error, ExecutionPassed) and not error.earlier_failures:
return 'PASS'
return 'FAIL'
class Keyword(_BaseKeyword):
def __init__(self, name, args, assign=None, type='kw'):
_BaseKeyword.__init__(self, name, args, type=type)
self.assign = assign or []
self.handler_name = name
def run(self, context):
handler = self._start(context)
try:
return_value = self._run(handler, context)
except ExecutionFailed as err:
self.status = self._get_status(err)
self._end(context, error=err)
raise
else:
if not (context.dry_run and handler.type == 'library'):
self.status = 'PASS'
self._end(context, return_value)
return return_value
def _start(self, context):
handler = context.get_handler(self.handler_name)
handler.init_keyword(context.variables)
self.name = self._get_name(handler.longname)
self.doc = handler.shortdoc
self.timeout = getattr(handler, 'timeout', '')
self.starttime = get_timestamp()
context.start_keyword(self)
if self.doc.startswith('*DEPRECATED*'):
msg = self.doc.replace('*DEPRECATED*', '', 1).strip()
name = self.name.split('} = ', 1)[-1] # Remove possible variable
context.warn("Keyword '%s' is deprecated. %s" % (name, msg))
return handler
def _get_name(self, handler_longname):
if not self.assign:
return handler_longname
return '%s = %s' % (', '.join(a.rstrip('= ') for a in self.assign),
handler_longname)
def _run(self, handler, context):
try:
return handler.run(context, self.args[:])
except ExecutionFailed:
raise
except:
self._report_failure(context)
def _end(self, context, return_value=None, error=None):
self.endtime = get_timestamp()
self.elapsedtime = get_elapsed_time(self.starttime, self.endtime)
if error and self.type == 'teardown':
self.message = unicode(error)
try:
if not error or error.can_continue(context.in_teardown):
self._set_variables(context, return_value, error)
finally:
context.end_keyword(self)
def _set_variables(self, context, return_value, error):
if error:
return_value = error.return_value
try:
VariableAssigner(self.assign).assign(context, return_value)
except DataError as err:
self.status = 'FAIL'
msg = unicode(err)
context.output.fail(msg)
raise ExecutionFailed(msg, syntax=True)
def _report_failure(self, context):
failure = HandlerExecutionFailed()
context.output.fail(failure.full_message)
if failure.traceback:
context.output.debug(failure.traceback)
raise failure
class ForLoop(_BaseKeyword):
def __init__(self, forstep, templated=False):
_BaseKeyword.__init__(self, self._get_name(forstep), type='for')
self.vars = forstep.vars
self.items = forstep.items
self.range = forstep.range
self.keywords = Keywords(forstep.steps, templated)
self._templated = templated
def _get_name(self, data):
return '%s %s [ %s ]' % (' | '.join(data.vars),
'IN' if not data.range else 'IN RANGE',
' | '.join(data.items))
def run(self, context):
self.starttime = get_timestamp()
context.start_keyword(self)
error = self._run_with_error_handling(self._validate_and_run, context)
self.status = self._get_status(error)
self.endtime = get_timestamp()
self.elapsedtime = get_elapsed_time(self.starttime, self.endtime)
context.end_keyword(self)
if error:
raise error
def _run_with_error_handling(self, runnable, context):
try:
runnable(context)
except ExecutionFailed as err:
return err
except DataError as err:
msg = unicode(err)
context.output.fail(msg)
return ExecutionFailed(msg, syntax=True)
else:
return None
def _validate_and_run(self, context):
self._validate()
self._run(context)
def _validate(self):
if not self.vars:
raise DataError('FOR loop has no loop variables.')
for var in self.vars:
if not is_scalar_var(var):
raise DataError("Invalid FOR loop variable '%s'." % var)
if not self.items:
raise DataError('FOR loop has no loop values.')
if not self.keywords:
raise DataError('FOR loop contains no keywords.')
def _run(self, context):
errors = []
items, iteration_steps = self._get_items_and_iteration_steps(context)
for i in iteration_steps:
values = items[i:i+len(self.vars)]
exception = self._run_one_round(context, self.vars, values)
if exception:
if isinstance(exception, ExitForLoop):
if exception.earlier_failures:
errors.extend(exception.earlier_failures.get_errors())
break
if isinstance(exception, ContinueForLoop):
if exception.earlier_failures:
errors.extend(exception.earlier_failures.get_errors())
continue
if isinstance(exception, ExecutionPassed):
exception.set_earlier_failures(errors)
raise exception
errors.extend(exception.get_errors())
if not exception.can_continue(context.in_teardown,
self._templated,
context.dry_run):
break
if errors:
raise ExecutionFailures(errors)
def _get_items_and_iteration_steps(self, context):
if context.dry_run:
return self.vars, [0]
items = self._replace_vars_from_items(context.variables)
return items, list(range(0, len(items), len(self.vars)))
def _run_one_round(self, context, variables, values):
foritem = _ForItem(variables, values)
context.start_keyword(foritem)
for var, value in zip(variables, values):
context.variables[var] = value
error = self._run_with_error_handling(self.keywords.run, context)
foritem.end(self._get_status(error))
context.end_keyword(foritem)
return error
def _replace_vars_from_items(self, variables):
items = variables.replace_list(self.items)
if self.range:
items = self._get_range_items(items)
if len(items) % len(self.vars) == 0:
return items
raise DataError('Number of FOR loop values should be multiple of '
'variables. Got %d variables but %d value%s.'
% (len(self.vars), len(items), plural_or_not(items)))
def _get_range_items(self, items):
try:
items = [self._to_int_with_arithmetics(item) for item in items]
except:
raise DataError('Converting argument of FOR IN RANGE failed: %s'
% get_error_message())
if not 1 <= len(items) <= 3:
raise DataError('FOR IN RANGE expected 1-3 arguments, '
'got %d instead.' % len(items))
return list(range(*items))
def _to_int_with_arithmetics(self, item):
item = str(item)
try:
return int(item)
except ValueError:
return int(eval(item))
class _ForItem(_BaseKeyword):
def __init__(self, vars, items):
name = ', '.join(format_assign_message(var, item)
for var, item in zip(vars, items))
_BaseKeyword.__init__(self, name, type='foritem')
self.starttime = get_timestamp()
def end(self, status):
self.status = status
self.endtime = get_timestamp()
self.elapsedtime = get_elapsed_time(self.starttime, self.endtime)
| apache-2.0 |
paolodedios/tensorflow | tensorflow/python/compiler/tensorrt/test/rank_two_test.py | 11 | 3285 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class RankTwoTest(trt_test.TfTrtIntegrationTestBase):
"""Test for rank 2 input in TF-TRT."""
def GraphFn(self, x1, x2):
# Two paths: first with rank 2 input, second with rank 4 input.
outputs = []
xs = [x1, x2]
for i in range(2):
x = xs[i]
c = constant_op.constant(1.0, name="c%d_1" % i)
q = math_ops.add(x, c, name="add%d_1" % i)
q = math_ops.abs(q, name="abs%d_1" % i)
c = constant_op.constant(2.2, name="c%d_2" % i)
q = math_ops.add(q, c, name="add%d_2" % i)
q = math_ops.abs(q, name="abs%d_2" % i)
c = constant_op.constant(3.0, name="c%d_3" % i)
q = math_ops.add(q, c, name="add%d_3" % i)
if i == 0:
axis = constant_op.constant(-1, dtype=dtypes.int32, name="axis")
for j in range(2):
q = array_ops.expand_dims(q, axis, name="expand%d_%d" % (i, j))
q = self.trt_incompatible_op(q)
q = gen_math_ops.reciprocal(q, name="reciprocal%d" % i)
outputs.append(q)
# Combine both paths
q = math_ops.add(outputs[0], outputs[1], name="add")
return array_ops.squeeze(q, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32,
[[12, 5], [12, 5, 2, 2]], [[12, 5, 2, 2]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"add0_1", "add0_2", "add0_3", "c0_1", "c0_2", "c0_3", "abs0_1",
"abs0_2", "expand0_0", "expand0_1", "axis"
],
"TRTEngineOp_1": [
"add1_1", "add1_2", "add1_3", "c1_1", "c1_2", "c1_3", "abs1_1",
"abs1_2", "reciprocal1"
],
# The two ops can't be in the same cluster as the ops in TRTEngineOp_0
# due to trt_incompatible_op. They can't be in the same cluster as the
# ops in TRTEngineOP_1 because their batch size belongs to a different
# equivalent class.
"TRTEngineOp_2": ["add", "reciprocal0"]
}
if __name__ == "__main__":
test.main()
| apache-2.0 |
wholebiome/aegea | setup.py | 1 | 2020 | #!/usr/bin/env python
import os, sys, glob, subprocess, textwrap
try:
import setuptools
assert int(setuptools.__version__.split(".", 1)[0]) >= 19
except (ImportError, AssertionError):
msg = 'Error: Aegea failed to install because your version of setuptools is too old ({}; 19 is required). Run "make install_venv" to install aegea in its own virtualenv, or upgrade your pip and setuptools to their latest versions.' # noqa
exit(textwrap.fill(msg.format(setuptools.__version__)))
try:
# Git version extraction logic designed to be compatible with both semver and PEP 440
version = subprocess.check_output(["git", "describe", "--tags", "--match", "v*.*.*"]).decode()
version = version.strip("v\n").replace("-", "+", 1).replace("-", ".")
except:
version = "0.0.0"
setuptools.setup(
name="aegea",
version=version,
url="https://github.com/kislyuk/aegea",
license=open("LICENSE.md").readline().strip(),
author="Andrey Kislyuk",
author_email="kislyuk@gmail.com",
description="Amazon Web Services Operator Interface",
long_description=open("README.rst").read(),
install_requires=[
"boto3 >= 1.4.7",
"botocore >= 1.8.0",
"argcomplete >= 1.8.2, < 2",
"paramiko >= 2.1.1, < 3",
"requests >= 2.12.4, < 3",
"tweak >= 0.4.0, < 1",
"keymaker >= 0.3.3, < 1",
"pyyaml >= 3.11, < 4",
"python-dateutil >= 2.1, <2.7.0",
"babel >= 2.3.4, < 3",
"ipwhois >= 0.13.0, < 1",
"uritemplate >= 3.0.0, < 4",
"awscli >= 1.2.9"
],
extras_require={
':python_version == "2.7"': [
"enum34 >= 1.1.6, < 2",
"ipaddress >= 1.0.17, < 2",
"subprocess32 >= 3.2.7, < 4"
]
},
tests_require=[
"coverage",
"flake8"
],
packages=setuptools.find_packages(exclude=["test"]),
scripts=glob.glob("scripts/*"),
platforms=["MacOS X", "Posix"],
test_suite="test",
include_package_data=True
)
| apache-2.0 |
AlexanderKaluzhny/instanotifier | instanotifier/notification/tests/test_serializer.py | 1 | 1861 | from test_plus.test import TestCase
import instanotifier.parser.rss.test_utils as parser_utils
from instanotifier.notification.serializers import RssNotificationSerializer, _compute_entry_id_hash
class TestRssNotificationSerializer(TestCase):
def setUp(self):
self.feed_items = parser_utils.get_test_rss_feed_items()
def test_serializer_validates_the_feed_item(self):
json_feed_item = self.feed_items[0]
serializer = RssNotificationSerializer(data=json_feed_item)
self.assertTrue(serializer.is_valid(), serializer.errors)
# NOTE: if run in task_eager mode, the feed_item was not serialized by the timeawareserializer,
# so the RssNotification form will not be valid.
def test_summary_field_is_sanitized(self):
invalid_values = ["<script> do_bad_stuff() </script>", '<a href="javascript: routine();">Click here for $100</a>']
expected = ["<script> do_bad_stuff() </script>", "<a>Click here for $100</a>"]
for idx, (invalid_value, expected) in enumerate(zip(invalid_values, expected)):
json_feed_item = self.feed_items[idx]
json_feed_item['summary'] = invalid_value
serializer = RssNotificationSerializer(data=json_feed_item)
self.assertTrue(serializer.is_valid(), serializer.errors)
notification = serializer.save()
self.assertEqual(
notification.summary, expected
)
def test_internal_id_is_evaluated(self):
json_feed_item = self.feed_items[0]
serializer = RssNotificationSerializer(data=json_feed_item)
self.assertTrue(serializer.is_valid(), serializer.errors)
notification = serializer.save()
hash = _compute_entry_id_hash(json_feed_item['entry_id'])
self.assertEqual(hash, notification.internal_id)
| mit |
LingxiaoJIA/gem5 | src/arch/x86/isa/insts/simd64/integer/arithmetic/multiply_add.py | 91 | 2946 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PMADDWD_MMX_MMX {
mmuli ufp3, mmx, mmxm, srcSize=2, destSize=4, ext = Signed + "| 0x10 | 0x20"
mmuli ufp4, mmx, mmxm, srcSize=2, destSize=4, ext = Signed + "| 0x10"
maddi mmx, ufp3, ufp4, size=4, ext=0
};
def macroop PMADDWD_MMX_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
mmuli ufp3, mmx, ufp1, srcSize=2, destSize=4, ext = Signed + "| 0x10 | 0x20"
mmuli ufp4, mmx, ufp1, srcSize=2, destSize=4, ext = Signed + "| 0x10"
maddi mmx, ufp3, ufp4, size=4, ext=0
};
def macroop PMADDWD_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
mmuli ufp3, mmx, ufp1, srcSize=2, destSize=4, ext = Signed + "| 0x10 | 0x20"
mmuli ufp4, mmx, ufp1, srcSize=2, destSize=4, ext = Signed + "| 0x10"
maddi mmx, ufp3, ufp4, size=4, ext=0
};
'''
| bsd-3-clause |
blacktear23/py-servicebus | servicebus/pika/frame.py | 1 | 7777 | """Frame objects that do the frame demarshaling and marshaling."""
import logging
import struct
from servicebus.pika import amqp_object
from servicebus.pika import exceptions
from servicebus.pika import spec
from servicebus.pika.compat import byte
LOGGER = logging.getLogger(__name__)
class Frame(amqp_object.AMQPObject):
"""Base Frame object mapping. Defines a behavior for all child classes for
assignment of core attributes and implementation of the a core _marshal
method which child classes use to create the binary AMQP frame.
"""
NAME = 'Frame'
def __init__(self, frame_type, channel_number):
"""Create a new instance of a frame
:param int frame_type: The frame type
:param int channel_number: The channel number for the frame
"""
self.frame_type = frame_type
self.channel_number = channel_number
def _marshal(self, pieces):
"""Create the full AMQP wire protocol frame data representation
:rtype: bytes
"""
payload = b''.join(pieces)
return struct.pack('>BHI', self.frame_type, self.channel_number,
len(payload)) + payload + byte(spec.FRAME_END)
def marshal(self):
"""To be ended by child classes
:raises NotImplementedError
"""
raise NotImplementedError
class Method(Frame):
"""Base Method frame object mapping. AMQP method frames are mapped on top
of this class for creating or accessing their data and attributes.
"""
NAME = 'METHOD'
def __init__(self, channel_number, method):
"""Create a new instance of a frame
:param int channel_number: The frame type
:param pika.Spec.Class.Method method: The AMQP Class.Method
"""
Frame.__init__(self, spec.FRAME_METHOD, channel_number)
self.method = method
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
pieces = self.method.encode()
pieces.insert(0, struct.pack('>I', self.method.INDEX))
return self._marshal(pieces)
class Header(Frame):
"""Header frame object mapping. AMQP content header frames are mapped
on top of this class for creating or accessing their data and attributes.
"""
NAME = 'Header'
def __init__(self, channel_number, body_size, props):
"""Create a new instance of a AMQP ContentHeader object
:param int channel_number: The channel number for the frame
:param int body_size: The number of bytes for the body
:param pika.spec.BasicProperties props: Basic.Properties object
"""
Frame.__init__(self, spec.FRAME_HEADER, channel_number)
self.body_size = body_size
self.properties = props
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
pieces = self.properties.encode()
pieces.insert(0, struct.pack('>HxxQ', self.properties.INDEX,
self.body_size))
return self._marshal(pieces)
class Body(Frame):
"""Body frame object mapping class. AMQP content body frames are mapped on
to this base class for getting/setting of attributes/data.
"""
NAME = 'Body'
def __init__(self, channel_number, fragment):
"""
Parameters:
- channel_number: int
- fragment: unicode or str
"""
Frame.__init__(self, spec.FRAME_BODY, channel_number)
self.fragment = fragment
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
return self._marshal([self.fragment])
class Heartbeat(Frame):
"""Heartbeat frame object mapping class. AMQP Heartbeat frames are mapped
on to this class for a common access structure to the attributes/data
values.
"""
NAME = 'Heartbeat'
def __init__(self):
"""Create a new instance of the Heartbeat frame"""
Frame.__init__(self, spec.FRAME_HEARTBEAT, 0)
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
return self._marshal(list())
class ProtocolHeader(amqp_object.AMQPObject):
"""AMQP Protocol header frame class which provides a pythonic interface
for creating AMQP Protocol headers
"""
NAME = 'ProtocolHeader'
def __init__(self, major=None, minor=None, revision=None):
"""Construct a Protocol Header frame object for the specified AMQP
version
:param int major: Major version number
:param int minor: Minor version number
:param int revision: Revision
"""
self.frame_type = -1
self.major = major or spec.PROTOCOL_VERSION[0]
self.minor = minor or spec.PROTOCOL_VERSION[1]
self.revision = revision or spec.PROTOCOL_VERSION[2]
def marshal(self):
"""Return the full AMQP wire protocol frame data representation of the
ProtocolHeader frame
:rtype: str
"""
return b'AMQP' + struct.pack('BBBB', 0, self.major, self.minor,
self.revision)
def decode_frame(data_in):
"""Receives raw socket data and attempts to turn it into a frame.
Returns bytes used to make the frame and the frame
:param str data_in: The raw data stream
:rtype: tuple(bytes consumed, frame)
:raises: pika.exceptions.InvalidFrameError
"""
# Look to see if it's a protocol header frame
try:
if data_in[0:4] == b'AMQP':
major, minor, revision = struct.unpack_from('BBB', data_in, 5)
return 8, ProtocolHeader(major, minor, revision)
except (IndexError, struct.error):
return 0, None
# Get the Frame Type, Channel Number and Frame Size
try:
(frame_type, channel_number,
frame_size) = struct.unpack('>BHL', data_in[0:7])
except struct.error:
return 0, None
# Get the frame data
frame_end = spec.FRAME_HEADER_SIZE + frame_size + spec.FRAME_END_SIZE
# We don't have all of the frame yet
if frame_end > len(data_in):
return 0, None
# The Frame termination chr is wrong
if data_in[frame_end - 1:frame_end] != byte(spec.FRAME_END):
raise exceptions.InvalidFrameError("Invalid FRAME_END marker")
# Get the raw frame data
frame_data = data_in[spec.FRAME_HEADER_SIZE:frame_end - 1]
if frame_type == spec.FRAME_METHOD:
# Get the Method ID from the frame data
method_id = struct.unpack_from('>I', frame_data)[0]
# Get a Method object for this method_id
method = spec.methods[method_id]()
# Decode the content
method.decode(frame_data, 4)
# Return the amount of data consumed and the Method object
return frame_end, Method(channel_number, method)
elif frame_type == spec.FRAME_HEADER:
# Return the header class and body size
class_id, weight, body_size = struct.unpack_from('>HHQ', frame_data)
# Get the Properties type
properties = spec.props[class_id]()
# Decode the properties out
properties.decode(frame_data[12:])
# Return a Header frame
return frame_end, Header(channel_number, body_size, properties)
elif frame_type == spec.FRAME_BODY:
# Return the amount of data consumed and the Body frame w/ data
return frame_end, Body(channel_number, frame_data)
elif frame_type == spec.FRAME_HEARTBEAT:
# Return the amount of data and a Heartbeat frame
return frame_end, Heartbeat()
raise exceptions.InvalidFrameError("Unknown frame type: %i" % frame_type)
| bsd-3-clause |
Chilledheart/googlemock | scripts/generator/cpp/ast.py | 62 | 62426 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
FUNCTION_OVERRIDE = 0x100
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter():
if default:
del default[0] # Remove flag.
end = type_modifiers[-1].end
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter()
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter()
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'override':
modifiers |= FUNCTION_OVERRIDE
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # )
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
40223139/LEGOg7-39 | static/Brython3.1.1-20150328-091302/Lib/ui/widget.py | 706 | 1774 | import random
from browser import doc
def getMousePosition(e):
if e is None:
e=win.event
if e.pageX or e.pageY:
return {'x': e.pageX, 'y': e.pageY}
if e.clientX or e.clientY:
_posx=e.clientX + doc.body.scrollLeft + doc.documentElement.scrollLeft;
_posy=e.clientY + doc.body.scrollTop + doc.documentElement.scrollTop;
return {'x': _posx, 'y': _posy}
return {'x': 0, 'y': 0}
class Widget:
def __init__(self, element, type, id=None):
self._element=element
if id is None:
self._element.id='%s_%s' % (type, int(100000*random.random()))
else:
self._element.id=id
def get_id(self):
return self._element.id
def attach(self, element_id):
""" append this DOM component to DOM element element_id"""
#document[element_id] <= self._element #this doesn't work :(
#doc is actually the global 'doc' not the one we imported from browser :(
doc[element_id] <= self._element
def show(self):
self._element.display='block'
def hide(self):
self._element.display='none'
class DraggableWidget(Widget):
def __init__(self, element, type, id=None):
Widget.__init__(self, element, type, id)
def drag(e):
self._element.style.top='%spx' % (e.clientY - self._deltaY)
self._element.style.left='%spx' % (e.clientX - self._deltaX)
def mouseDown(e):
self._element.style.position='absolute'
self._deltaX=e.clientX - self._element.offsetLeft
self._deltaY=e.clientY - self._element.offsetTop
doc.bind('mousemove', drag)
def mouseUp(e):
doc.unbind('mousemove')
self._element.bind('mousedown', mouseDown)
self._element.bind('mouseup', mouseUp)
| agpl-3.0 |
aurelijusb/arangodb | 3rdParty/V8-4.3.61/tools/release/git_recipes.py | 19 | 9087 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
SHA1_RE = re.compile('^[a-fA-F0-9]{40}$')
ROLL_DEPS_GIT_SVN_ID_RE = re.compile('^git-svn-id: .*@([0-9]+) .*$')
# Regular expression that matches a single commit footer line.
COMMIT_FOOTER_ENTRY_RE = re.compile(r'([^:]+):\s+(.+)')
# Footer metadata key for commit position.
COMMIT_POSITION_FOOTER_KEY = 'Cr-Commit-Position'
# Regular expression to parse a commit position
COMMIT_POSITION_RE = re.compile(r'(.+)@\{#(\d+)\}')
# Key for the 'git-svn' ID metadata commit footer entry.
GIT_SVN_ID_FOOTER_KEY = 'git-svn-id'
# e.g., git-svn-id: https://v8.googlecode.com/svn/trunk@23117
# ce2b1a6d-e550-0410-aec6-3dcde31c8c00
GIT_SVN_ID_RE = re.compile(r'[^@]+@(\d+)\s+(?:[a-zA-Z0-9\-]+)')
# Copied from bot_update.py.
def GetCommitMessageFooterMap(message):
"""Returns: (dict) A dictionary of commit message footer entries.
"""
footers = {}
# Extract the lines in the footer block.
lines = []
for line in message.strip().splitlines():
line = line.strip()
if len(line) == 0:
del(lines[:])
continue
lines.append(line)
# Parse the footer
for line in lines:
m = COMMIT_FOOTER_ENTRY_RE.match(line)
if not m:
# If any single line isn't valid, the entire footer is invalid.
footers.clear()
return footers
footers[m.group(1)] = m.group(2).strip()
return footers
class GitFailedException(Exception):
pass
def Strip(f):
def new_f(*args, **kwargs):
result = f(*args, **kwargs)
if result is None:
return result
else:
return result.strip()
return new_f
def MakeArgs(l):
"""['-a', '', 'abc', ''] -> '-a abc'"""
return " ".join(filter(None, l))
def Quoted(s):
return "\"%s\"" % s
class GitRecipesMixin(object):
def GitIsWorkdirClean(self, **kwargs):
return self.Git("status -s -uno", **kwargs).strip() == ""
@Strip
def GitBranch(self, **kwargs):
return self.Git("branch", **kwargs)
def GitCreateBranch(self, name, remote="", **kwargs):
assert name
remote_args = ["--upstream", remote] if remote else []
self.Git(MakeArgs(["new-branch", name] + remote_args), **kwargs)
def GitDeleteBranch(self, name, **kwargs):
assert name
self.Git(MakeArgs(["branch -D", name]), **kwargs)
def GitReset(self, name, **kwargs):
assert name
self.Git(MakeArgs(["reset --hard", name]), **kwargs)
def GitStash(self, **kwargs):
self.Git(MakeArgs(["stash"]), **kwargs)
def GitRemotes(self, **kwargs):
return map(str.strip,
self.Git(MakeArgs(["branch -r"]), **kwargs).splitlines())
def GitCheckout(self, name, **kwargs):
assert name
self.Git(MakeArgs(["checkout -f", name]), **kwargs)
def GitCheckoutFile(self, name, branch_or_hash, **kwargs):
assert name
assert branch_or_hash
self.Git(MakeArgs(["checkout -f", branch_or_hash, "--", name]), **kwargs)
def GitCheckoutFileSafe(self, name, branch_or_hash, **kwargs):
try:
self.GitCheckoutFile(name, branch_or_hash, **kwargs)
except GitFailedException: # pragma: no cover
# The file doesn't exist in that revision.
return False
return True
def GitChangedFiles(self, git_hash, **kwargs):
assert git_hash
try:
files = self.Git(MakeArgs(["diff --name-only",
git_hash,
"%s^" % git_hash]), **kwargs)
return map(str.strip, files.splitlines())
except GitFailedException: # pragma: no cover
# Git fails using "^" at branch roots.
return []
@Strip
def GitCurrentBranch(self, **kwargs):
for line in self.Git("status -s -b -uno", **kwargs).strip().splitlines():
match = re.match(r"^## (.+)", line)
if match: return match.group(1)
raise Exception("Couldn't find curent branch.") # pragma: no cover
@Strip
def GitLog(self, n=0, format="", grep="", git_hash="", parent_hash="",
branch="", reverse=False, **kwargs):
assert not (git_hash and parent_hash)
args = ["log"]
if n > 0:
args.append("-%d" % n)
if format:
args.append("--format=%s" % format)
if grep:
args.append("--grep=\"%s\"" % grep.replace("\"", "\\\""))
if reverse:
args.append("--reverse")
if git_hash:
args.append(git_hash)
if parent_hash:
args.append("%s^" % parent_hash)
args.append(branch)
return self.Git(MakeArgs(args), **kwargs)
def GitGetPatch(self, git_hash, **kwargs):
assert git_hash
return self.Git(MakeArgs(["log", "-1", "-p", git_hash]), **kwargs)
# TODO(machenbach): Unused? Remove.
def GitAdd(self, name, **kwargs):
assert name
self.Git(MakeArgs(["add", Quoted(name)]), **kwargs)
def GitApplyPatch(self, patch_file, reverse=False, **kwargs):
assert patch_file
args = ["apply --index --reject"]
if reverse:
args.append("--reverse")
args.append(Quoted(patch_file))
self.Git(MakeArgs(args), **kwargs)
def GitUpload(self, reviewer="", author="", force=False, cq=False,
bypass_hooks=False, cc="", **kwargs):
args = ["cl upload --send-mail"]
if author:
args += ["--email", Quoted(author)]
if reviewer:
args += ["-r", Quoted(reviewer)]
if force:
args.append("-f")
if cq:
args.append("--use-commit-queue")
if bypass_hooks:
args.append("--bypass-hooks")
if cc:
args += ["--cc", Quoted(cc)]
# TODO(machenbach): Check output in forced mode. Verify that all required
# base files were uploaded, if not retry.
self.Git(MakeArgs(args), pipe=False, **kwargs)
def GitCommit(self, message="", file_name="", author=None, **kwargs):
assert message or file_name
args = ["commit"]
if file_name:
args += ["-aF", Quoted(file_name)]
if message:
args += ["-am", Quoted(message)]
if author:
args += ["--author", "\"%s <%s>\"" % (author, author)]
self.Git(MakeArgs(args), **kwargs)
def GitPresubmit(self, **kwargs):
self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"", **kwargs)
def GitCLLand(self, **kwargs):
self.Git(
"cl land -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs)
def GitDiff(self, loc1, loc2, **kwargs):
return self.Git(MakeArgs(["diff", loc1, loc2]), **kwargs)
def GitPull(self, **kwargs):
self.Git("pull", **kwargs)
def GitFetchOrigin(self, **kwargs):
self.Git("fetch origin", **kwargs)
@Strip
# Copied from bot_update.py and modified for svn-like numbers only.
def GetCommitPositionNumber(self, git_hash, **kwargs):
"""Dumps the 'git' log for a specific revision and parses out the commit
position number.
If a commit position metadata key is found, its number will be returned.
Otherwise, we will search for a 'git-svn' metadata entry. If one is found,
its SVN revision value is returned.
"""
git_log = self.GitLog(format='%B', n=1, git_hash=git_hash, **kwargs)
footer_map = GetCommitMessageFooterMap(git_log)
# Search for commit position metadata
value = footer_map.get(COMMIT_POSITION_FOOTER_KEY)
if value:
match = COMMIT_POSITION_RE.match(value)
if match:
return match.group(2)
# Extract the svn revision from 'git-svn' metadata
value = footer_map.get(GIT_SVN_ID_FOOTER_KEY)
if value:
match = GIT_SVN_ID_RE.match(value)
if match:
return match.group(1)
raise GitFailedException("Couldn't determine commit position for %s" %
git_hash)
| apache-2.0 |
mpasternak/michaldtz-fixes-518-522 | contrib/scene2d/scene2d/sprite.py | 29 | 10008 | #!/usr/bin/env python
'''
Model code for managing sprites
===============================
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from pyglet.gl import *
from scene2d.drawable import *
class SpriteLayer(object):
'''Represents a group of sprites at the same z depth.
'''
def __init__(self, z=0, sprites=None):
self.z = z
if sprites is None:
sprites = []
self.sprites = sprites
def get(self, x, y):
''' Return object at position px=(x,y).
Return None if out of bounds.'''
r = []
for sprite in self.sprites:
if sprite.contains(x, y):
r.append(sprite)
return r
def get_in_region(self, x1, y1, x2, y2):
'''Return Drawables that are within the pixel bounds specified by
the bottom-left (x1, y1) and top-right (x2, y2) corners.
'''
r = []
for sprite in self.sprites:
if sprite._x > x2: continue
if (sprite._x + sprite.width) < x1: continue
if sprite._y > y2: continue
if (sprite._y + sprite.height) < y1: continue
r.append(sprite)
return r
class Sprite(Drawable):
'''A sprite with some dimensions, image to draw and optional animation
to run.
Attributes:
x, y -- position
width, height -- sprite dimensions (may differ from image)
image -- image for this sprite
offset -- offset of image from position (default (0,0))
animations -- a queue of SpriteAnimations to run
properties -- arbitrary data in a dict
'''
def __init__(self, x, y, width, height, image, offset=(0,0),
properties=None):
super(Sprite, self).__init__()
self._x, self._y = x, y
self.width, self.height = width, height
self.image = image
self.offset = offset
self.animations = []
if properties is None:
self.properties = {}
else:
self.properties = properties
# pre-calculate the style to force creation of _style
self.get_style()
@classmethod
def from_image(cls, x, y, image, offset=(0,0), properties=None):
'''Set up the sprite from the image - sprite dimensions are the
same as the image.'''
return cls(x, y, image.width, image.height, image, offset,
properties)
def get_drawstyle(self):
'''Use the image style as a basis and modify to move.
'''
style = self.image.get_style().copy()
offx, offy = self.offset
# XXX remove int() if we get sub-pixel drawing of textures
style.x, style.y = int(self._x - offx), int(self._y - offy)
return style
def push_animation(self, animation):
"Push a SpriteAnimation onto this sprite's animation queue."
raise NotImplemented()
def cancel_animation(self):
'Cancel the current animation being run.'
raise NotImplemented()
def clear_animation(self):
'Clear the animation queue.'
raise NotImplemented()
def animate(self, dt):
'''Animate this sprite to handle passing of dt time.
If self.image has a .animate method it will be called.
'''
raise NotImplemented()
def contains(self, x, y):
'''Return True if the point is inside the sprite.'''
if x < self.x: return False
if y < self.y: return False
if x >= self.x + self.width: return False
if y >= self.y + self.height: return False
return True
def overlaps(self, rect):
'''Return True if this sprite overlaps the other rect.
A rect is an object that has an origin .x, .y and size .width,
.height.
'''
# we avoid using .right and .top properties here to speed things up
if self.x > (rect.x + rect.width): return False
if (self.x + self.width) < rect.x: return False
if self.y > (rect.y + rect.height): return False
if (self.y + self.height) < rect.y: return False
return True
def get_x(self):
return self._x
def set_x(self, x):
self._x = x
if self._style is not None:
# XXX remove int() if we get sub-pixel drawing of textures
self._style.x = int(x - self.offset[0])
x = property(get_x, set_x)
def get_y(self):
return self._y
def set_y(self, y):
self._y = y
if self._style is not None:
# XXX remove int() if we get sub-pixel drawing of textures
self._style.y = int(y - self.offset[1])
y = property(get_y, set_y)
# r/w, in pixels, y extent
def get_top(self): return self.y + self.height
def set_top(self, y): self.y = y - self.height
top = property(get_top, set_top)
# r/w, in pixels, y extent
def get_bottom(self): return self.y
def set_bottom(self, y): self.y = y
bottom = property(get_bottom, set_bottom)
# r/w, in pixels, x extent
def get_left(self): return self.x
def set_left(self, x): self.x = x
left = property(get_left, set_left)
# r/w, in pixels, x extent
def get_right(self): return self.x + self.width
def set_right(self, x): self.x = x - self.width
right = property(get_right, set_right)
# r/w, in pixels, (x, y)
def get_center(self):
return (self.x + self.width/2, self.y + self.height/2)
def set_center(self, center):
x, y = center
self.x = x - self.width/2
self.y = y - self.height/2
center = property(get_center, set_center)
# r/w, in pixels, (x, y)
def get_midtop(self):
return (self.x + self.width/2, self.y + self.height)
def set_midtop(self, midtop):
x, y = midtop
self.x = x - self.width/2
self.y = y - self.height
midtop = property(get_midtop, set_midtop)
# r/w, in pixels, (x, y)
def get_midbottom(self):
return (self.x + self.width/2, self.y)
def set_midbottom(self, midbottom):
x, y = midbottom
self.x = x - self.width/2
self.y = y
midbottom = property(get_midbottom, set_midbottom)
# r/w, in pixels, (x, y)
def get_midleft(self):
return (self.x, self.y + self.height/2)
def set_midleft(self, midleft):
x, y = midleft
self.x = x
self.y = y - self.height/2
midleft = property(get_midleft, set_midleft)
# r/w, in pixels, (x, y)
def get_midright(self):
return (self.x + self.width, self.y + self.height/2)
def set_midright(self, midright):
x, y = midright
self.x = x - self.width
self.y = y - self.height/2
midright = property(get_midright, set_midright)
# r/w, in pixels, (x, y)
def get_topleft(self):
return (self.x, self.y + self.height)
def set_topleft(self, pos):
x, y = pos
self.x = x
self.y = y - self.height
topleft = property(get_topleft, set_topleft)
# r/w, in pixels, (x, y)
def get_topright(self):
return (self.x + self.width, self.y + self.height)
def set_topright(self, pos):
x, y = pos
self.x = x - self.width
self.y = y - self.height
topright = property(get_topright, set_topright)
# r/w, in pixels, (x, y)
def get_bottomright(self):
return (self.x + self.width, self.y)
def set_bottomright(self, pos):
x, y = pos
self.x = x - self.width
self.y = y
bottomright = property(get_bottomright, set_bottomright)
# r/w, in pixels, (x, y)
def get_bottomleft(self):
return (self.x, self.y)
def set_bottomleft(self, pos):
self.x, self.y = pos
bottomleft = property(get_bottomleft, set_bottomleft)
class RotatableSprite(Sprite):
'''A sprite that may be rotated.
Additional attributes:
angle -- angle of rotation in degrees
'''
def __init__(self, x, y, width, height, image, angle=0,
offset=(0,0), properties=None):
self._angle = angle
super(RotatableSprite, self).__init__(x, y, width, height, image,
offset, properties)
def get_angle(self):
return self._angle
def set_angle(self, angle):
self._angle = angle
self._style.angle = angle
angle = property(get_angle, set_angle)
@classmethod
def from_image(cls, x, y, image, angle=0, offset=(0,0),
properties=None):
'''Set up the sprite from the image - sprite dimensions are the
same as the image.'''
return cls(x, y, image.width, image.height, image, angle,
offset, properties)
def get_drawstyle(self):
style = self.image.get_style().copy()
style.x, style.y = self._x, self._y
style.angle = self._angle
return style
"""
class SpriteAnimation:
def animate(self, sprite, dt):
''' run this animation to handle passing of dt time. alters sprite
position and optionally image'''
raise NotImplemented()
class JumpAnimation(SpriteAnimation):
velocity = (vx, vy) # in pixels / second?
gravity = # in pixels / second?
ground = # height of ground
map = # tilemap with the ground / walls in it
image = # optional ImageAnimation to run
class PathAnimation(SpriteAnimation):
''' Will animate smoothly from one position to another, optionallyo to
another, optionally accelerating, etc. '''
points = [(x, y)] # points to move to in order
speed = # initial speed in direction of first point
velocity = # initial velocity if not in ^^^
turn_speed = # optional speed to slow to for turning
acceleration = # needed if turn_speed != None
max_speed = # needed if acceleration != None
"""
| bsd-3-clause |
louietsai/python-for-android | python3-alpha/python3-src/Lib/socket.py | 46 | 13835 | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EINTR = getattr(errno, 'EINTR', 4)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name."""
s = _socket.socket.__repr__(self)
if s.startswith("<socket object"):
s = "<%s.%s%s%s" % (self.__class__.__module__,
self.__class__.__name__,
getattr(self, '_closed', False) and " [closed] " or "",
s[7:])
return s
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
sock = socket(self.family, self.type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
for c in mode:
if c not in {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)")
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise IOError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except error as e:
n = e.args[0]
if n == EINTR:
continue
if n in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
return self._reading and not self.closed
def writable(self):
"""True if the SocketIO is open for writing.
"""
return self._writing and not self.closed
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
| apache-2.0 |
edxzw/edx-platform | lms/djangoapps/courseware/access.py | 11 | 28955 | """
This file contains (or should), all access control logic for the courseware.
Ideally, it will be the only place that needs to know about any special settings
like DISABLE_START_DATES.
Note: The access control logic in this file does NOT check for enrollment in
a course. It is expected that higher layers check for enrollment so we
don't have to hit the enrollments table on every module load.
If enrollment is to be checked, use get_course_with_access in courseware.courses.
It is a wrapper around has_access that additionally checks for enrollment.
"""
from datetime import datetime
import logging
import pytz
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.utils.timezone import UTC
from opaque_keys.edx.keys import CourseKey, UsageKey
from xblock.core import XBlock
from xmodule.course_module import (
CourseDescriptor,
CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
CATALOG_VISIBILITY_ABOUT,
)
from xmodule.error_module import ErrorDescriptor
from xmodule.x_module import XModule, DEPRECATION_VSCOMPAT_EVENT
from xmodule.split_test_module import get_split_user_partitions
from xmodule.partitions.partitions import NoSuchUserPartitionError, NoSuchUserPartitionGroupError
from external_auth.models import ExternalAuthMap
from courseware.masquerade import get_masquerade_role, is_masquerading_as_student
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from student import auth
from student.models import CourseEnrollmentAllowed
from student.roles import (
CourseBetaTesterRole,
CourseInstructorRole,
CourseStaffRole,
GlobalStaff,
SupportStaffRole,
OrgInstructorRole,
OrgStaffRole,
)
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
any_unfulfilled_milestones,
is_prerequisite_courses_enabled,
)
from ccx_keys.locator import CCXLocator
import dogstats_wrapper as dog_stats_api
from courseware.access_response import (
MilestoneError,
MobileAvailabilityError,
VisibilityError,
)
from courseware.access_utils import adjust_start_date, check_start_date, debug, ACCESS_GRANTED, ACCESS_DENIED
log = logging.getLogger(__name__)
def has_access(user, action, obj, course_key=None):
"""
Check whether a user has the access to do action on obj. Handles any magic
switching based on various settings.
Things this module understands:
- start dates for modules
- visible_to_staff_only for modules
- DISABLE_START_DATES
- different access for instructor, staff, course staff, and students.
- mobile_available flag for course modules
user: a Django user object. May be anonymous. If none is passed,
anonymous is assumed
obj: The object to check access for. A module, descriptor, location, or
certain special strings (e.g. 'global')
action: A string specifying the action that the client is trying to perform.
actions depend on the obj type, but include e.g. 'enroll' for courses. See the
type-specific functions below for the known actions for that type.
course_key: A course_key specifying which course run this access is for.
Required when accessing anything other than a CourseDescriptor, 'global',
or a location with category 'course'
Returns an AccessResponse object. It is up to the caller to actually
deny access in a way that makes sense in context.
"""
# Just in case user is passed in as None, make them anonymous
if not user:
user = AnonymousUser()
if isinstance(course_key, CCXLocator):
course_key = course_key.to_course_locator()
# delegate the work to type-specific functions.
# (start with more specific types, then get more general)
if isinstance(obj, CourseDescriptor):
return _has_access_course(user, action, obj)
if isinstance(obj, CourseOverview):
return _has_access_course(user, action, obj)
if isinstance(obj, ErrorDescriptor):
return _has_access_error_desc(user, action, obj, course_key)
if isinstance(obj, XModule):
return _has_access_xmodule(user, action, obj, course_key)
# NOTE: any descriptor access checkers need to go above this
if isinstance(obj, XBlock):
return _has_access_descriptor(user, action, obj, course_key)
if isinstance(obj, CCXLocator):
return _has_access_ccx_key(user, action, obj)
if isinstance(obj, CourseKey):
return _has_access_course_key(user, action, obj)
if isinstance(obj, UsageKey):
return _has_access_location(user, action, obj, course_key)
if isinstance(obj, basestring):
return _has_access_string(user, action, obj)
# Passing an unknown object here is a coding error, so rather than
# returning a default, complain.
raise TypeError("Unknown object type in has_access(): '{0}'"
.format(type(obj)))
# ================ Implementation helpers ================================
def _can_access_descriptor_with_start_date(user, descriptor, course_key): # pylint: disable=invalid-name
"""
Checks if a user has access to a descriptor based on its start date.
If there is no start date specified, grant access.
Else, check if we're past the start date.
Note:
We do NOT check whether the user is staff or if the descriptor
is detached... it is assumed both of these are checked by the caller.
Arguments:
user (User): the user whose descriptor access we are checking.
descriptor (AType): the descriptor for which we are checking access,
where AType is CourseDescriptor, CourseOverview, or any other class
that represents a descriptor and has the attributes .location, .id,
.start, and .days_early_for_beta.
Returns:
AccessResponse: The result of this access check. Possible results are
ACCESS_GRANTED or a StartDateError.
"""
return check_start_date(user, descriptor.days_early_for_beta, descriptor.start, course_key)
def _can_view_courseware_with_prerequisites(user, course): # pylint: disable=invalid-name
"""
Checks if a user has access to a course based on its prerequisites.
If the user is staff or anonymous, immediately grant access.
Else, return whether or not the prerequisite courses have been passed.
Arguments:
user (User): the user whose course access we are checking.
course (AType): the course for which we are checking access.
where AType is CourseDescriptor, CourseOverview, or any other
class that represents a course and has the attributes .location
and .id.
"""
def _is_prerequisites_disabled():
"""
Checks if prerequisites are disabled in the settings.
"""
return ACCESS_DENIED if is_prerequisite_courses_enabled() else ACCESS_GRANTED
return (
_is_prerequisites_disabled()
or _has_staff_access_to_descriptor(user, course, course.id)
or user.is_anonymous()
or _has_fulfilled_prerequisites(user, [course.id])
)
def _can_load_course_on_mobile(user, course):
"""
Checks if a user can view the given course on a mobile device.
This function only checks mobile-specific access restrictions. Other access
restrictions such as start date and the .visible_to_staff_only flag must
be checked by callers in *addition* to the return value of this function.
Arguments:
user (User): the user whose course access we are checking.
course (CourseDescriptor|CourseOverview): the course for which we are
checking access.
Returns:
bool: whether the course can be accessed on mobile.
"""
return (
is_mobile_available_for_user(user, course) and
(
_has_staff_access_to_descriptor(user, course, course.id) or
_has_fulfilled_all_milestones(user, course.id)
)
)
def _can_enroll_courselike(user, courselike):
"""
Ascertain if the user can enroll in the given courselike object.
Arguments:
user (User): The user attempting to enroll.
courselike (CourseDescriptor or CourseOverview): The object representing the
course in which the user is trying to enroll.
Returns:
AccessResponse, indicating whether the user can enroll.
"""
enrollment_domain = courselike.enrollment_domain
# Courselike objects (e.g., course descriptors and CourseOverviews) have an attribute named `id`
# which actually points to a CourseKey. Sigh.
course_key = courselike.id
# If using a registration method to restrict enrollment (e.g., Shibboleth)
if settings.FEATURES.get('RESTRICT_ENROLL_BY_REG_METHOD') and enrollment_domain:
if user is not None and user.is_authenticated() and \
ExternalAuthMap.objects.filter(user=user, external_domain=enrollment_domain):
debug("Allow: external_auth of " + enrollment_domain)
reg_method_ok = True
else:
reg_method_ok = False
else:
reg_method_ok = True
# If the user appears in CourseEnrollmentAllowed paired with the given course key,
# they may enroll. Note that as dictated by the legacy database schema, the filter
# call includes a `course_id` kwarg which requires a CourseKey.
if user is not None and user.is_authenticated():
if CourseEnrollmentAllowed.objects.filter(email=user.email, course_id=course_key):
return ACCESS_GRANTED
if _has_staff_access_to_descriptor(user, courselike, course_key):
return ACCESS_GRANTED
if courselike.invitation_only:
debug("Deny: invitation only")
return ACCESS_DENIED
now = datetime.now(UTC())
enrollment_start = courselike.enrollment_start or datetime.min.replace(tzinfo=pytz.UTC)
enrollment_end = courselike.enrollment_end or datetime.max.replace(tzinfo=pytz.UTC)
if reg_method_ok and enrollment_start < now < enrollment_end:
debug("Allow: in enrollment period")
return ACCESS_GRANTED
return ACCESS_DENIED
def _has_access_course(user, action, courselike):
"""
Check if user has access to a course.
Arguments:
user (User): the user whose course access we are checking.
action (string): The action that is being checked.
courselike (CourseDescriptor or CourseOverview): The object
representing the course that the user wants to access.
Valid actions:
'load' -- load the courseware, see inside the course
'load_forum' -- can load and contribute to the forums (one access level for now)
'load_mobile' -- can load from a mobile context
'enroll' -- enroll. Checks for enrollment window.
'see_exists' -- can see that the course exists.
'staff' -- staff access to course.
'see_in_catalog' -- user is able to see the course listed in the course catalog.
'see_about_page' -- user is able to see the course about page.
"""
def can_load():
"""
Can this user load this course?
NOTE: this is not checking whether user is actually enrolled in the course.
"""
response = (
_visible_to_nonstaff_users(courselike) and
_can_access_descriptor_with_start_date(user, courselike, courselike.id)
)
return (
ACCESS_GRANTED if (response or _has_staff_access_to_descriptor(user, courselike, courselike.id))
else response
)
def can_enroll():
"""
Returns whether the user can enroll in the course.
"""
return _can_enroll_courselike(user, courselike)
def see_exists():
"""
Can see if can enroll, but also if can load it: if user enrolled in a course and now
it's past the enrollment period, they should still see it.
"""
return ACCESS_GRANTED if (can_enroll() or can_load()) else ACCESS_DENIED
def can_see_in_catalog():
"""
Implements the "can see course in catalog" logic if a course should be visible in the main course catalog
In this case we use the catalog_visibility property on the course descriptor
but also allow course staff to see this.
"""
return (
_has_catalog_visibility(courselike, CATALOG_VISIBILITY_CATALOG_AND_ABOUT)
or _has_staff_access_to_descriptor(user, courselike, courselike.id)
)
def can_see_about_page():
"""
Implements the "can see course about page" logic if a course about page should be visible
In this case we use the catalog_visibility property on the course descriptor
but also allow course staff to see this.
"""
return (
_has_catalog_visibility(courselike, CATALOG_VISIBILITY_CATALOG_AND_ABOUT)
or _has_catalog_visibility(courselike, CATALOG_VISIBILITY_ABOUT)
or _has_staff_access_to_descriptor(user, courselike, courselike.id)
)
checkers = {
'load': can_load,
'view_courseware_with_prerequisites':
lambda: _can_view_courseware_with_prerequisites(user, courselike),
'load_mobile': lambda: can_load() and _can_load_course_on_mobile(user, courselike),
'enroll': can_enroll,
'see_exists': see_exists,
'staff': lambda: _has_staff_access_to_descriptor(user, courselike, courselike.id),
'instructor': lambda: _has_instructor_access_to_descriptor(user, courselike, courselike.id),
'see_in_catalog': can_see_in_catalog,
'see_about_page': can_see_about_page,
}
return _dispatch(checkers, action, user, courselike)
def _has_access_error_desc(user, action, descriptor, course_key):
"""
Only staff should see error descriptors.
Valid actions:
'load' -- load this descriptor, showing it to the user.
'staff' -- staff access to descriptor.
"""
def check_for_staff():
return _has_staff_access_to_descriptor(user, descriptor, course_key)
checkers = {
'load': check_for_staff,
'staff': check_for_staff,
'instructor': lambda: _has_instructor_access_to_descriptor(user, descriptor, course_key)
}
return _dispatch(checkers, action, user, descriptor)
def _has_group_access(descriptor, user, course_key):
"""
This function returns a boolean indicating whether or not `user` has
sufficient group memberships to "load" a block (the `descriptor`)
"""
if len(descriptor.user_partitions) == len(get_split_user_partitions(descriptor.user_partitions)):
# Short-circuit the process, since there are no defined user partitions that are not
# user_partitions used by the split_test module. The split_test module handles its own access
# via updating the children of the split_test module.
return ACCESS_GRANTED
# use merged_group_access which takes group access on the block's
# parents / ancestors into account
merged_access = descriptor.merged_group_access
# check for False in merged_access, which indicates that at least one
# partition's group list excludes all students.
if False in merged_access.values():
log.warning("Group access check excludes all students, access will be denied.", exc_info=True)
return ACCESS_DENIED
# resolve the partition IDs in group_access to actual
# partition objects, skipping those which contain empty group directives.
# If a referenced partition could not be found, it will be denied
# If the partition is found but is no longer active (meaning it's been disabled)
# then skip the access check for that partition.
partitions = []
for partition_id, group_ids in merged_access.items():
try:
partition = descriptor._get_user_partition(partition_id) # pylint: disable=protected-access
if partition.active:
if group_ids is not None:
partitions.append(partition)
else:
log.debug(
"Skipping partition with ID %s in course %s because it is no longer active",
partition.id, course_key
)
except NoSuchUserPartitionError:
log.warning("Error looking up user partition, access will be denied.", exc_info=True)
return ACCESS_DENIED
# next resolve the group IDs specified within each partition
partition_groups = []
try:
for partition in partitions:
groups = [
partition.get_group(group_id)
for group_id in merged_access[partition.id]
]
if groups:
partition_groups.append((partition, groups))
except NoSuchUserPartitionGroupError:
log.warning("Error looking up referenced user partition group, access will be denied.", exc_info=True)
return ACCESS_DENIED
# look up the user's group for each partition
user_groups = {}
for partition, groups in partition_groups:
user_groups[partition.id] = partition.scheme.get_group_for_user(
course_key,
user,
partition,
)
# finally: check that the user has a satisfactory group assignment
# for each partition.
if not all(user_groups.get(partition.id) in groups for partition, groups in partition_groups):
return ACCESS_DENIED
# all checks passed.
return ACCESS_GRANTED
def _has_access_descriptor(user, action, descriptor, course_key=None):
"""
Check if user has access to this descriptor.
Valid actions:
'load' -- load this descriptor, showing it to the user.
'staff' -- staff access to descriptor.
NOTE: This is the fallback logic for descriptors that don't have custom policy
(e.g. courses). If you call this method directly instead of going through
has_access(), it will not do the right thing.
"""
def can_load():
"""
NOTE: This does not check that the student is enrolled in the course
that contains this module. We may or may not want to allow non-enrolled
students to see modules. If not, views should check the course, so we
don't have to hit the enrollments table on every module load.
"""
response = (
_visible_to_nonstaff_users(descriptor)
and _has_group_access(descriptor, user, course_key)
and
(
_has_detached_class_tag(descriptor)
or _can_access_descriptor_with_start_date(user, descriptor, course_key)
)
)
return (
ACCESS_GRANTED if (response or _has_staff_access_to_descriptor(user, descriptor, course_key))
else response
)
checkers = {
'load': can_load,
'staff': lambda: _has_staff_access_to_descriptor(user, descriptor, course_key),
'instructor': lambda: _has_instructor_access_to_descriptor(user, descriptor, course_key)
}
return _dispatch(checkers, action, user, descriptor)
def _has_access_xmodule(user, action, xmodule, course_key):
"""
Check if user has access to this xmodule.
Valid actions:
- same as the valid actions for xmodule.descriptor
"""
# Delegate to the descriptor
return has_access(user, action, xmodule.descriptor, course_key)
def _has_access_location(user, action, location, course_key):
"""
Check if user has access to this location.
Valid actions:
'staff' : True if the user has staff access to this location
NOTE: if you add other actions, make sure that
has_access(user, location, action) == has_access(user, get_item(location), action)
"""
checkers = {
'staff': lambda: _has_staff_access_to_location(user, location, course_key)
}
return _dispatch(checkers, action, user, location)
def _has_access_course_key(user, action, course_key):
"""
Check if user has access to the course with this course_key
Valid actions:
'staff' : True if the user has staff access to this location
'instructor' : True if the user has staff access to this location
"""
checkers = {
'staff': lambda: _has_staff_access_to_location(user, None, course_key),
'instructor': lambda: _has_instructor_access_to_location(user, None, course_key),
}
return _dispatch(checkers, action, user, course_key)
def _has_access_ccx_key(user, action, ccx_key):
"""Check if user has access to the course for this ccx_key
Delegates checking to _has_access_course_key
Valid actions: same as for that function
"""
course_key = ccx_key.to_course_locator()
return _has_access_course_key(user, action, course_key)
def _has_access_string(user, action, perm):
"""
Check if user has certain special access, specified as string. Valid strings:
'global'
Valid actions:
'staff' -- global staff access.
'support' -- access to student support functionality
'certificates' --- access to view and regenerate certificates for other users.
"""
def check_staff():
"""
Checks for staff access
"""
if perm != 'global':
debug("Deny: invalid permission '%s'", perm)
return ACCESS_DENIED
return ACCESS_GRANTED if GlobalStaff().has_user(user) else ACCESS_DENIED
def check_support():
"""Check that the user has access to the support UI. """
if perm != 'global':
return ACCESS_DENIED
return (
ACCESS_GRANTED if GlobalStaff().has_user(user) or SupportStaffRole().has_user(user)
else ACCESS_DENIED
)
checkers = {
'staff': check_staff,
'support': check_support,
'certificates': check_support,
}
return _dispatch(checkers, action, user, perm)
##### Internal helper methods below
def _dispatch(table, action, user, obj):
"""
Helper: call table[action], raising a nice pretty error if there is no such key.
user and object passed in only for error messages and debugging
"""
if action in table:
result = table[action]()
debug("%s user %s, object %s, action %s",
'ALLOWED' if result else 'DENIED',
user,
obj.location.to_deprecated_string() if isinstance(obj, XBlock) else str(obj),
action)
return result
raise ValueError(u"Unknown action for object type '{0}': '{1}'".format(
type(obj), action))
def _adjust_start_date_for_beta_testers(user, descriptor, course_key): # pylint: disable=invalid-name
"""
If user is in a beta test group, adjust the start date by the appropriate number of
days.
Arguments:
user: A django user. May be anonymous.
descriptor: the XModuleDescriptor the user is trying to get access to, with a
non-None start date.
Returns:
A datetime. Either the same as start, or earlier for beta testers.
NOTE: number of days to adjust should be cached to avoid looking it up thousands of
times per query.
NOTE: For now, this function assumes that the descriptor's location is in the course
the user is looking at. Once we have proper usages and definitions per the XBlock
design, this should use the course the usage is in.
"""
return adjust_start_date(user, descriptor.days_early_for_beta, descriptor.start, course_key)
def _has_instructor_access_to_location(user, location, course_key=None):
if course_key is None:
course_key = location.course_key
return _has_access_to_course(user, 'instructor', course_key)
def _has_staff_access_to_location(user, location, course_key=None):
if course_key is None:
course_key = location.course_key
return _has_access_to_course(user, 'staff', course_key)
def _has_access_to_course(user, access_level, course_key):
"""
Returns True if the given user has access_level (= staff or
instructor) access to the course with the given course_key.
This ensures the user is authenticated and checks if global staff or has
staff / instructor access.
access_level = string, either "staff" or "instructor"
"""
if user is None or (not user.is_authenticated()):
debug("Deny: no user or anon user")
return ACCESS_DENIED
if is_masquerading_as_student(user, course_key):
return ACCESS_DENIED
if GlobalStaff().has_user(user):
debug("Allow: user.is_staff")
return ACCESS_GRANTED
if access_level not in ('staff', 'instructor'):
log.debug("Error in access._has_access_to_course access_level=%s unknown", access_level)
debug("Deny: unknown access level")
return ACCESS_DENIED
staff_access = (
CourseStaffRole(course_key).has_user(user) or
OrgStaffRole(course_key.org).has_user(user)
)
if staff_access and access_level == 'staff':
debug("Allow: user has course staff access")
return ACCESS_GRANTED
instructor_access = (
CourseInstructorRole(course_key).has_user(user) or
OrgInstructorRole(course_key.org).has_user(user)
)
if instructor_access and access_level in ('staff', 'instructor'):
debug("Allow: user has course instructor access")
return ACCESS_GRANTED
debug("Deny: user did not have correct access")
return ACCESS_DENIED
def _has_instructor_access_to_descriptor(user, descriptor, course_key): # pylint: disable=invalid-name
"""Helper method that checks whether the user has staff access to
the course of the location.
descriptor: something that has a location attribute
"""
return _has_instructor_access_to_location(user, descriptor.location, course_key)
def _has_staff_access_to_descriptor(user, descriptor, course_key):
"""Helper method that checks whether the user has staff access to
the course of the location.
descriptor: something that has a location attribute
"""
return _has_staff_access_to_location(user, descriptor.location, course_key)
def _visible_to_nonstaff_users(descriptor):
"""
Returns if the object is visible to nonstaff users.
Arguments:
descriptor: object to check
"""
return VisibilityError() if descriptor.visible_to_staff_only else ACCESS_GRANTED
def _has_detached_class_tag(descriptor):
"""
Returns if the given descriptor's type is marked as detached.
Arguments:
descriptor: object to check
"""
return ACCESS_GRANTED if 'detached' in descriptor._class_tags else ACCESS_DENIED # pylint: disable=protected-access
def _has_fulfilled_all_milestones(user, course_id):
"""
Returns whether the given user has fulfilled all milestones for the
given course.
Arguments:
course_id: ID of the course to check
user_id: ID of the user to check
"""
return MilestoneError() if any_unfulfilled_milestones(course_id, user.id) else ACCESS_GRANTED
def _has_fulfilled_prerequisites(user, course_id):
"""
Returns whether the given user has fulfilled all prerequisites for the
given course.
Arguments:
user: user to check
course_id: ID of the course to check
"""
return MilestoneError() if get_pre_requisite_courses_not_completed(user, course_id) else ACCESS_GRANTED
def _has_catalog_visibility(course, visibility_type):
"""
Returns whether the given course has the given visibility type
"""
return ACCESS_GRANTED if course.catalog_visibility == visibility_type else ACCESS_DENIED
def _is_descriptor_mobile_available(descriptor):
"""
Returns if descriptor is available on mobile.
"""
return ACCESS_GRANTED if descriptor.mobile_available else MobileAvailabilityError()
def is_mobile_available_for_user(user, descriptor):
"""
Returns whether the given course is mobile_available for the given user.
Checks:
mobile_available flag on the course
Beta User and staff access overrides the mobile_available flag
Arguments:
descriptor (CourseDescriptor|CourseOverview): course or overview of course in question
"""
return (
auth.user_has_role(user, CourseBetaTesterRole(descriptor.id))
or _has_staff_access_to_descriptor(user, descriptor, descriptor.id)
or _is_descriptor_mobile_available(descriptor)
)
def get_user_role(user, course_key):
"""
Return corresponding string if user has staff, instructor or student
course role in LMS.
"""
role = get_masquerade_role(user, course_key)
if role:
return role
elif has_access(user, 'instructor', course_key):
return 'instructor'
elif has_access(user, 'staff', course_key):
return 'staff'
else:
return 'student'
| agpl-3.0 |
sealhuang/brainDecodingToolbox | braincode/vim2/util.py | 3 | 14748 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import glob
import numpy as np
import nibabel as nib
import matplotlib.pylab as plt
import matplotlib.image as mpimg
from braincode.math import corr2_coef, make_2d_gaussian, make_2d_dog, make_2d_log
def idx2coord(vec_idx):
"""Convert row index in response data matrix into 3D coordinate in
(original) ROI volume.
"""
data_size = (18, 64, 64)
coord_z = vec_idx % data_size[2]
coord_x = vec_idx / (data_size[1]*data_size[2])
coord_y = (vec_idx % (data_size[1]*data_size[2])) / data_size[2]
return (coord_x, coord_y, coord_z)
def coord2idx(coord):
"""Convert a 3D coordinate from nifti file into row index in response
data matrix.
Input must be a tuple.
"""
ncoord = (coord[2], coord[0], 63-coord[1])
return ncoord[2]+ncoord[0]*64*64+ncoord[1]*64
def node2feature(node_idx, data_shape):
"""Convert node index from CNN activation vector into 3 features including
index of channel, row and column position of the filter.
Return a tuple of (channel index, row index, column index).
"""
#data_size = {'conv1': [96, 55, 55],
# 'conv2': [256, 27, 27],
# 'conv3': [384, 13, 13],
# 'conv4': [384, 13, 13],
# 'cpnv5': [256, 13, 13],
# 'pool5': [256, 6, 6]}
#s = data_size[layer_name]
s = data_shape
col_idx = node_idx % s[2]
channel_idx = node_idx / (s[1]*s[2])
row_idx = (node_idx % (s[1]*s[2])) / s[2]
return (channel_idx, row_idx, col_idx)
def vxl_data2nifti(data, vxl_idx, out_file):
"""Save data according to its voxel index into a nifti file."""
data_mtx = np.zeros((18, 64, 64))
data_mtx[:] = np.nan
data_mtx = data_mtx.flatten()
data_mtx[vxl_idx] = data
save2nifti(data_mtx.reshape(18, 64, 64), out_file)
def save2nifti(data, filename):
"""Save 3D data as nifti file.
Original data shape is (18, 64, 64), and the resulting data shape is
(64, 64, 18) which orientation is SRP."""
# roll axis
ndata = np.rollaxis(data, 0, 3)
ndata = ndata[:, ::-1, :]
# generate affine matrix
aff = np.zeros((4, 4))
aff[0, 1] = 2
aff[1, 2] = -2.5
aff[2, 0] = 2
aff[3, 3] = 1
img = nib.Nifti1Image(ndata, aff)
nib.save(img, filename)
def mask2nifti(data, filename):
"""Save 3D mask derived from pycortex as nifti file.
Original data shape is (18, 64, 64), and the resulting data shape is
(64, 64, 18) which orientation is SRP."""
# roll axis
data = data.astype('<f8')
ndata = np.rollaxis(data, 0, 3)
ndata = np.rollaxis(ndata, 0, 2)
ndata = ndata[:, ::-1, :]
# generate affine matrix
aff = np.zeros((4, 4))
aff[0, 1] = 2
aff[1, 2] = -2.5
aff[2, 0] = 2
aff[3, 3] = 1
img = nib.Nifti1Image(ndata, aff)
nib.save(img, filename)
def plot_prf(prf_file):
"""Plot pRF."""
prf_data = np.load(prf_file)
vxl = prf_data[..., 0]
# figure config
for f in range(96):
fig, axs = plt.subplots(5, 8)
for t in range(40):
tmp = vxl[:, t].reshape(96, 55, 55)
tmp = tmp[f, :]
im = axs[t/8][t%8].imshow(tmp, interpolation='nearest',
cmap=plt.cm.ocean,
vmin=-0.2, vmax=0.3)
fig.colorbar(im)
#plt.show()
fig.savefig('%s.png'%(f))
def channel_sim(feat_file):
"""Compute similarity between each pair of channels."""
feat = np.load(feat_file)
print feat.shape
feat = feat.reshape(96, 55, 55, 540)
simmtx = np.zeros((feat.shape[0], feat.shape[0]))
for i in range(feat.shape[0]):
for j in range(i+1, feat.shape[0]):
print '%s - %s' %(i, j)
x = feat[i, :].reshape(-1, feat.shape[3])
y = feat[j, :].reshape(-1, feat.shape[3])
tmp = corr2_coef(x, y)
tmp = tmp.diagonal()
simmtx[i, j] = tmp.mean()
np.save('sim_mtx.npy', simmtx)
im = plt.imshow(simmtx, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar(im)
plt.show()
def data_swap(nifti_file):
"""Convert nifti data into original data shape."""
data = nib.load(nifti_file).get_data()
ndata = data[:, ::-1, :]
ndata = np.rollaxis(ndata, 0, 3)
ndata = np.rollaxis(ndata, 0, 3)
return ndata
def nifti4pycortex(nifti_file):
"""Load nifti file for pycortex visualization."""
data = nib.load(nifti_file).get_data()
ndata = np.rollaxis(data, 0, 3)
ndata = np.rollaxis(ndata, 0, 2)
return ndata
def plot_cca_fweights(data, out_dir, prefix_name, two_side=False):
"""Plot features weights derived from CCA."""
if len(data.shape)==3:
data = np.expand_dims(data, axis=3)
n_components = data.shape[3]
n_channels = data.shape[0]
for f in range(n_components):
fig, axs = plt.subplots(8, 12)
cdata = data[..., f]
if two_side:
maxv = max(cdata.max(), -1*cdata.min())
minv = -1 * maxv
else:
maxv = cdata.max()
minv = cdata.min()
for c in range(n_channels):
tmp = cdata[c, ...]
im = axs[c/12][c%12].imshow(tmp, interpolation='nearest',
vmin=minv, vmax=maxv)
axs[c/12][c%12].get_xaxis().set_visible(False)
axs[c/12][c%12].get_yaxis().set_visible(False)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.2, 0.03, 0.6])
fig.colorbar(im, cax=cbar_ax)
fig.savefig(os.path.join(out_dir, prefix_name+'_%s.png'%(f+1)))
def plot_avg_weights_pattern(feat_weights, top_channels_num=None):
"""Plot average features weights derived from CCA."""
if len(feat_weights.shape)==3:
feat_weights = np.expand_dims(feat_weights, axis=3)
n_components = feat_weights.shape[3]
n_channels = feat_weights.shape[0]
if top_channels_num and top_channels_num <= n_channels:
avg_weights = feat_weights[:top_channels_num, ...].mean(axis=0)
else:
avg_weights = feat_weights.mean(axis=0)
maxv = avg_weights.max()
minv = avg_weights.min()
fig, axs = plt.subplots(2, 5)
for f in range(n_components):
cdata = avg_weights[..., f]
im = axs[f/5][f%5].imshow(cdata, interpolation='nearest',
vmin=minv, vmax=maxv)
axs[f/5][f%5].get_xaxis().set_visible(False)
axs[f/5][f%5].get_yaxis().set_visible(False)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.2, 0.03, 0.6])
fig.colorbar(im, cax=cbar_ax)
fig.show()
def save_cca_volweights(fmri_weights, mask_file, out_dir, prefix_name,
out_png=True, two_side=False):
"""Save fmri weights derived from CCA as nifti files."""
n_components = fmri_weights.shape[1]
mask = data_swap(mask_file)
vxl_idx = np.nonzero(mask.flatten()==1)[0]
for i in range(n_components):
tmp = np.zeros_like(mask.flatten(), dtype=np.float64)
tmp[vxl_idx] = fmri_weights[:, i]
tmp = tmp.reshape(mask.shape)
nii_file = os.path.join(out_dir, prefix_name+'%s.nii.gz'%(i+1))
save2nifti(tmp, nii_file)
if out_png:
import cortex
from matplotlib import cm
subj_id = out_dir.split('/')[-3]
if two_side:
img = cortex.quickflat.make_figure(cortex.Volume(nii_file,
subj_id, 'func2anat', cmap=cm.bwr,
vmin=-1., vmax=1.),
with_curvature=True)
else:
img = cortex.quickflat.make_figure(cortex.Volume(nii_file,
subj_id, 'func2anat', cmap=cm.hot,
vmin=0., vmax=1.),
with_curvature=True)
png_file = os.path.join(out_dir, prefix_name+'%s.png'%(i+1))
img.savefig(png_file, dpi=200)
def display_video(dataset):
"""Display 3D video."""
plt.ion()
for i in range(dataset.shape[2]):
plt.imshow(dataset[:, i])
plt.pause(0.05)
def plot_kernerls(in_dir, basename, filename):
"""Plot several kernel images in one screen."""
file_num = len(glob.glob(os.path.join(in_dir, basename+'*')))
fig, axs = plt.subplots(8, 12)
for n in range(file_num):
f = os.path.join(in_dir, basename+str(n)+'.png')
img = mpimg.imread(f)
# normalize image into zero-one range
nimg = (img - img.min()) / (img.max() - img.min())
im = axs[n/12][n%12].imshow(nimg)
axs[n/12][n%12].get_xaxis().set_visible(False)
axs[n/12][n%12].get_yaxis().set_visible(False)
fig.savefig(os.path.join(in_dir, filename))
def save_imshow(data, filename, val_range=None):
"""Save `imshow` figure as file."""
fig, ax = plt.subplots()
if val_range:
vmin = val_range[0]
vmax = val_range[1]
else:
vmin = data.min()
vmax = data.max()
cax = ax.imshow(data.astype(np.float64), vmin=vmin, vmax=vmax, cmap='gray')
fig.colorbar(cax)
fig.savefig(filename)
plt.close()
def save_hue(data, filename):
"""Save hue tune for a voxel."""
fig, ax = plt.subplots()
x = np.linspace(0, 2*np.pi, 201)
ax.plot(x, data)
fig.savefig(filename)
plt.close()
def fweights_bar(feat_weights):
"""Bar plots for feature weights derived from CCA.
For each feature/2D feature map, top 20% `abs` weights are averaged
for evaluation.
"""
avg_weights = fweights_top_mean(feat_weights, 0.2)
cc_num = avg_weights.shape[0]
fig, axs = plt.subplots(cc_num, 1)
for i in range(cc_num):
ind = np.arange(channel_num)
axs[i].bar(ind, avg_weights[i], 0.35)
plt.show()
def fweights_top_mean(feat_weights, top_ratio):
"""Derive average of top `top_ratio` weights from each channels."""
cc_num = feat_weights.shape[3]
channel_num = feat_weights.shape[0]
avg_weights = np.zeros((cc_num, channel_num))
for i in range(cc_num):
tmp = feat_weights[..., i]
for j in range(channel_num):
ctmp = np.abs(tmp[j, ...]).flatten()
ctmp.sort()
avg_weights[i, j] = ctmp[-1*int(ctmp.shape[0]*top_ratio):].mean()
return avg_weights
def roi2nifti(fmri_table, filename, mode='full'):
"""Save ROI as a nifti file.
`mode`: 'full' for whole ROIs mask creation.
'small' for mask creation for alignment.
"""
if mode=='full':
roi_label = {'v1lh': 1, 'v1rh': 2, 'v2lh': 3, 'v2rh': 4,
'v3lh': 5, 'v3rh': 6, 'v3alh': 7, 'v3arh': 8,
'v3blh': 9, 'v3brh': 10, 'v4lh': 11, 'v4rh': 12,
'latocclh': 13, 'latoccrh': 14, 'VOlh': 15, 'VOrh': 16,
'STSlh': 17, 'STSrh': 18, 'RSClh': 19, 'RSCrh': 20,
'PPAlh': 21, 'PPArh': 22, 'OBJlh': 23, 'OBJrh': 24,
'MTlh': 25, 'MTrh': 26, 'MTplh': 27, 'MTprh': 28,
'IPlh': 29, 'IPrh': 30, 'FFAlh': 31, 'FFArh': 32,
'EBAlh': 33, 'EBArh': 34, 'OFAlh': 35, 'OFArh': 36,
'v7alh': 37, 'v7arh': 38, 'v7blh': 39, 'v7brh': 40,
'v7clh': 41, 'v7crh': 42, 'v7lh': 43, 'v7rh': 44,
'IPS1lh': 45, 'IPS1rh': 46, 'IPS2lh': 47, 'IPS2rh': 48,
'IPS3lh': 49, 'IPS3rh': 50, 'IPS4lh': 51, 'IPS4rh': 52,
'MSTlh': 53, 'MSTrh': 54, 'TOSlh': 55, 'TOSrh': 56}
else:
roi_label = {'v1lh': 1, 'v1rh': 2, 'v2lh': 3, 'v2rh': 4,
'v3lh': 5, 'v3rh': 6, 'v3alh': 7, 'v3arh': 8,
'v3blh': 9, 'v3brh': 10, 'v4lh': 11, 'v4rh': 12,
'MTlh': 13, 'MTrh': 14, 'MTplh': 15, 'MTprh': 16}
roi_list = fmri_table.list_nodes('/roi')
roi_shape = roi_list[0].shape
roi_mask = np.zeros(roi_shape)
roi_list = [r.name for r in roi_list if r.name in roi_label]
for r in roi_list:
roi_mask += fmri_table.get_node('/roi/%s'%(r))[:] * roi_label[r]
save2nifti(roi_mask, filename)
def get_roi_mask(fmri_table, nifti=False):
"""Save ROIs as a mask."""
roi_list = fmri_table.list_nodes('/roi')
roi_shape = roi_list[0].shape
mask = np.zeros(roi_shape)
for r in roi_list:
mask += fmri_table.get_node('/roi/%s'%(r.name))[:]
if nifti:
save2nifti(mask, 'all_roi_mask.nii.gz')
else:
return mask.flatten()
def gen_mean_vol(fmri_table, dataset, filename):
"""Make a mean response map as a reference volume."""
data = fmri_table.get_node('/'+dataset)[:]
# replace nan to zero
data = np.nan_to_num(data)
mean_data = np.mean(data, axis=1)
vol = np.zeros((18, 64, 64))
for i in range(data.shape[0]):
c = vutil.idx2coord(i)
vol[c[0], c[1], c[2]] = mean_data[i]
save2nifti(vol, filename)
def spatial_sim_seq(fmri_data):
"""Calculate spatial similarity between adjacent time points.
fmri_data : A 2D array, each row represents a voxel's time course.
"""
length = fmri_data.shape[1]
ssim_seq = np.zeros((length, ))
for i in range(1, length):
pdata = fmri_data[:, i-1]
ndata = fmri_data[:, i]
ssim_seq[i] = np.corrcoef(pdata, ndata)[0, 1]
return ssim_seq
def make_gaussian_prf(size):
"""Generate various pRFs based on 2d Gaussian kernel with different
parameters.
Return a pRF matrixs which shape is (size, size, size*size*fwhm#)
"""
fwhm_num = 10
fwhms = np.arange(1, fwhm_num+1)
prfs = np.zeros((size, size, size*size*fwhm_num))
for k in range(fwhm_num):
for i in range(size):
for j in range(size):
idx = k*size*size + i*size + j
prfs[:, :, idx] = make_2d_gaussian(size, fwhm=fwhms[k],
center=(j, i))
return prfs
def sugar_gaussian_f(size, x0, y0, sigma, offset, beta):
"""Sugar function for model fitting."""
g = make_2d_gaussian(size, sigma, center=(y0, x0))
g = offset + beta * g
return g.ravel()
def sugar_dog_f(size, x0, y0, c_sigma, s_sigma, c_beta, s_beta):
"""Sugar function for model fitting."""
g = make_2d_dog(size, c_sigma, s_sigma, c_beta, s_beta, center=(y0, x0))
return g.ravel()
def sugar_log_f(size, x0, y0, sigma, offset, beta):
"""Sugar function for model fitting."""
g = make_2d_log(size, sigma, center=(y0, x0))
g = offset + beta * g
return g.ravel()
| bsd-3-clause |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_firewall_rule.py | 7 | 1326 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewayFirewallRule(Model):
"""A web application firewall rule.
All required parameters must be populated in order to send to Azure.
:param rule_id: Required. The identifier of the web application firewall
rule.
:type rule_id: int
:param description: The description of the web application firewall rule.
:type description: str
"""
_validation = {
'rule_id': {'required': True},
}
_attribute_map = {
'rule_id': {'key': 'ruleId', 'type': 'int'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayFirewallRule, self).__init__(**kwargs)
self.rule_id = kwargs.get('rule_id', None)
self.description = kwargs.get('description', None)
| mit |
agry/NGECore2 | scripts/mobiles/naboo/giant_horned_krevol.py | 4 | 1537 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('giant_horned_krevol')
mobileTemplate.setLevel(6)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Reptilian Meat")
mobileTemplate.setMeatAmount(10)
mobileTemplate.setSocialGroup("self")
mobileTemplate.setAssistRange(2)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_giant_horned_krevol.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_2')
attacks.add('bm_bolster_armor_2')
attacks.add('bm_damage_poison_2')
attacks.add('bm_enfeeble_2')
mobileTemplate.setDefaultAttack('creatureRangedAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('giant_horned_krevol', mobileTemplate)
return | lgpl-3.0 |
yaroslavprogrammer/django | tests/test_utils/tests.py | 50 | 24252 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import warnings
from django.db import connection
from django.forms import EmailField, IntegerField
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.html import HTMLParseError, parse_html
from django.test.utils import CaptureQueriesContext, IgnorePendingDeprecationWarningsMixin
from django.utils import six
from django.utils import unittest
from django.utils.unittest import skip
from .models import Person
class SkippingTestCase(TestCase):
def test_skip_unless_db_feature(self):
"A test that might be skipped is actually called."
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
self.assertRaises(ValueError, test_func)
class AssertNumQueriesTests(TestCase):
urls = 'test_utils.urls'
def test_assert_num_queries(self):
def test_func():
raise ValueError
self.assertRaises(ValueError,
self.assertNumQueries, 2, test_func
)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name='test')
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
class AssertQuerysetEqualTests(TestCase):
def setUp(self):
self.p1 = Person.objects.create(name='p1')
self.p2 = Person.objects.create(name='p2')
def test_ordered(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[repr(self.p1), repr(self.p2)]
)
def test_unordered(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[repr(self.p2), repr(self.p1)],
ordered=False
)
def test_transform(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[self.p1.pk, self.p2.pk],
transform=lambda x: x.pk
)
def test_undefined_order(self):
# Using an unordered queryset with more than one ordered value
# is an error.
with self.assertRaises(ValueError):
self.assertQuerysetEqual(
Person.objects.all(),
[repr(self.p1), repr(self.p2)]
)
# No error for one value.
self.assertQuerysetEqual(
Person.objects.filter(name='p1'),
[repr(self.p1)]
)
class CaptureQueriesContextManagerTests(TestCase):
urls = 'test_utils.urls'
def setUp(self):
self.person_pk = six.text_type(Person.objects.create(name='test').pk)
def test_simple(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
pass
self.assertEqual(0, len(captured_queries))
def test_within(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
def test_nested(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.count()
with CaptureQueriesContext(connection) as nested_captured_queries:
Person.objects.count()
self.assertEqual(1, len(nested_captured_queries))
self.assertEqual(2, len(captured_queries))
def test_failure(self):
with self.assertRaises(TypeError):
with CaptureQueriesContext(connection):
raise TypeError
def test_with_client(self):
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 2)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
self.assertIn(self.person_pk, captured_queries[1]['sql'])
class AssertNumQueriesContextManagerTests(TestCase):
urls = 'test_utils.urls'
def test_simple(self):
with self.assertNumQueries(0):
pass
with self.assertNumQueries(1):
Person.objects.count()
with self.assertNumQueries(2):
Person.objects.count()
Person.objects.count()
def test_failure(self):
with self.assertRaises(AssertionError) as exc_info:
with self.assertNumQueries(2):
Person.objects.count()
self.assertIn("1 queries executed, 2 expected", str(exc_info.exception))
with self.assertRaises(TypeError):
with self.assertNumQueries(4000):
raise TypeError
def test_with_client(self):
person = Person.objects.create(name="test")
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(2):
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
class AssertTemplateUsedContextManagerTests(TestCase):
def test_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed(template_name='template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
render_to_string('template_used/base.html')
def test_nested_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/include.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/extends.html'):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/alternative.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateNotUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
def test_not_used(self):
with self.assertTemplateNotUsed('template_used/base.html'):
pass
with self.assertTemplateNotUsed('template_used/alternative.html'):
pass
def test_error_message(self):
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html'):
with self.assertTemplateUsed('template_used/base.html'):
pass
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html'):
with self.assertTemplateUsed(template_name='template_used/base.html'):
pass
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html.*template_used/alternative\.html$'):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
def test_failure(self):
with self.assertRaises(TypeError):
with self.assertTemplateUsed():
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(''):
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(''):
render_to_string('template_used/base.html')
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(template_name=''):
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
class SaveRestoreWarningState(TestCase):
def test_save_restore_warnings_state(self):
"""
Ensure save_warnings_state/restore_warnings_state work correctly.
"""
# In reality this test could be satisfied by many broken implementations
# of save_warnings_state/restore_warnings_state (e.g. just
# warnings.resetwarnings()) , but it is difficult to test more.
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.save_warnings_state()
class MyWarning(Warning):
pass
# Add a filter that causes an exception to be thrown, so we can catch it
warnings.simplefilter("error", MyWarning)
self.assertRaises(Warning, lambda: warnings.warn("warn", MyWarning))
# Now restore.
self.restore_warnings_state()
# After restoring, we shouldn't get an exception. But we don't want a
# warning printed either, so we have to silence the warning.
warnings.simplefilter("ignore", MyWarning)
warnings.warn("warn", MyWarning)
# Remove the filter we just added.
self.restore_warnings_state()
class HTMLEqualTests(TestCase):
def test_html_parser(self):
element = parse_html('<div><p>Hello</p></div>')
self.assertEqual(len(element.children), 1)
self.assertEqual(element.children[0].name, 'p')
self.assertEqual(element.children[0].children[0], 'Hello')
parse_html('<p>')
parse_html('<p attr>')
dom = parse_html('<p>foo')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.name, 'p')
self.assertEqual(dom[0], 'foo')
def test_parse_html_in_script(self):
parse_html('<script>var a = "<p" + ">";</script>');
parse_html('''
<script>
var js_sha_link='<p>***</p>';
</script>
''')
# script content will be parsed to text
dom = parse_html('''
<script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script>
''')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>")
def test_self_closing_tags(self):
self_closing_tags = ('br' , 'hr', 'input', 'img', 'meta', 'spacer',
'link', 'frame', 'base', 'col')
for tag in self_closing_tags:
dom = parse_html('<p>Hello <%s> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
dom = parse_html('<p>Hello <%s /> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
def test_simple_equal_html(self):
self.assertHTMLEqual('', '')
self.assertHTMLEqual('<p></p>', '<p></p>')
self.assertHTMLEqual('<p></p>', ' <p> </p> ')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div><p>Hello</p></div>')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div> <p>Hello</p> </div>')
self.assertHTMLEqual(
'<div>\n<p>Hello</p></div>',
'<div><p>Hello</p></div>\n')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<p>Hello World !</p>',
'<p>Hello World\n\n!</p>')
self.assertHTMLEqual('<p> </p>', '<p></p>')
self.assertHTMLEqual('<p/>', '<p></p>')
self.assertHTMLEqual('<p />', '<p></p>')
self.assertHTMLEqual('<input checked>', '<input checked="checked">')
self.assertHTMLEqual('<p>Hello', '<p> Hello')
self.assertHTMLEqual('<p>Hello</p>World', '<p>Hello</p> World')
def test_ignore_comments(self):
self.assertHTMLEqual(
'<div>Hello<!-- this is a comment --> World!</div>',
'<div>Hello World!</div>')
def test_unequal_html(self):
self.assertHTMLNotEqual('<p>Hello</p>', '<p>Hello!</p>')
self.assertHTMLNotEqual('<p>foobar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo bar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo nbsp</p>', '<p>foo </p>')
self.assertHTMLNotEqual('<p>foo #20</p>', '<p>foo </p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span><span>World</span></p>',
'<p><span>Hello</span>World</p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span>World</p>',
'<p><span>Hello</span><span>World</span></p>')
def test_attributes(self):
self.assertHTMLEqual(
'<input type="text" id="id_name" />',
'<input id="id_name" type="text" />')
self.assertHTMLEqual(
'''<input type='text' id="id_name" />''',
'<input id="id_name" type="text" />')
self.assertHTMLNotEqual(
'<input type="text" id="id_name" />',
'<input type="password" id="id_name" />')
def test_complex_examples(self):
self.assertHTMLEqual(
"""<tr><th><label for="id_first_name">First name:</label></th>
<td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""",
"""
<tr><th>
<label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" />
</td></tr>
<tr><th>
<label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" />
</td></tr>
<tr><th>
<label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
</td></tr>
""")
self.assertHTMLEqual(
"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p>
This is a valid paragraph
<div> this is a div AFTER the p</div>
</body>
</html>""", """
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p> This is a valid paragraph
<!-- browsers would close the p tag here -->
<div> this is a div AFTER the p</div>
</p> <!-- this is invalid HTML parsing, but it should make no
difference in most cases -->
</body>
</html>""")
def test_html_contain(self):
# equal html contains each other
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertTrue(dom1 in dom2)
self.assertTrue(dom2 in dom1)
dom2 = parse_html('<div><p>foo</p></div>')
self.assertTrue(dom1 in dom2)
self.assertTrue(dom2 not in dom1)
self.assertFalse('<p>foo</p>' in dom2)
self.assertTrue('foo' in dom2)
# when a root element is used ...
dom1 = parse_html('<p>foo</p><p>bar</p>')
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertTrue(dom1 in dom2)
dom1 = parse_html('<p>foo</p>')
self.assertTrue(dom1 in dom2)
dom1 = parse_html('<p>bar</p>')
self.assertTrue(dom1 in dom2)
def test_count(self):
# equal html contains each other one time
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertEqual(dom1.count(dom2), 1)
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo foo</p><p>foo</p>')
self.assertEqual(dom2.count('foo'), 3)
dom2 = parse_html('<p class="bar">foo</p>')
self.assertEqual(dom2.count('bar'), 0)
self.assertEqual(dom2.count('class'), 0)
self.assertEqual(dom2.count('p'), 0)
self.assertEqual(dom2.count('o'), 2)
dom2 = parse_html('<p>foo</p><p>foo</p>')
self.assertEqual(dom2.count(dom1), 2)
dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<div><div><p>foo</p></div></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>foo</p></p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>bar</p></p>')
self.assertEqual(dom2.count(dom1), 0)
def test_parsing_errors(self):
with self.assertRaises(AssertionError):
self.assertHTMLEqual('<p>', '')
with self.assertRaises(AssertionError):
self.assertHTMLEqual('', '<p>')
with self.assertRaises(HTMLParseError):
parse_html('</p>')
def test_contains_html(self):
response = HttpResponse('''<body>
This is a form: <form action="" method="get">
<input type="text" name="Hello" />
</form></body>''')
self.assertNotContains(response, "<input name='Hello' type='text'>")
self.assertContains(response, '<form action="" method="get">')
self.assertContains(response, "<input name='Hello' type='text'>", html=True)
self.assertNotContains(response, '<form action="" method="get">', html=True)
invalid_response = HttpResponse('''<body <bad>>''')
with self.assertRaises(AssertionError):
self.assertContains(invalid_response, '<p></p>')
with self.assertRaises(AssertionError):
self.assertContains(response, '<p "whats" that>')
def test_unicode_handling(self):
response = HttpResponse('<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>')
self.assertContains(response, '<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>', html=True)
class XMLEqualTests(TestCase):
def test_simple_equal(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_unordered(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raise(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal(self):
xml1 = "<elem attr1='a' attr2='c' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLNotEqual(xml1, xml2)
def test_simple_not_equal_raise(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml1, xml2)
def test_parsing_errors(self):
xml_unvalid = "<elem attr1='a attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml_unvalid, xml2)
def test_comment_root(self):
xml1 = "<?xml version='1.0'?><!-- comment1 --><elem attr1='a' attr2='b' />"
xml2 = "<?xml version='1.0'?><!-- comment2 --><elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
class SkippingExtraTests(TestCase):
fixtures = ['should_not_be_loaded.json']
# HACK: This depends on internals of our TestCase subclasses
def __call__(self, result=None):
# Detect fixture loading by counting SQL queries, should be zero
with self.assertNumQueries(0):
super(SkippingExtraTests, self).__call__(result)
@skip("Fixture loading should not be performed for skipped tests.")
def test_fixtures_are_skipped(self):
pass
class AssertRaisesMsgTest(SimpleTestCase):
def test_special_re_chars(self):
"""assertRaisesMessage shouldn't interpret RE special chars."""
def func1():
raise ValueError("[.*x+]y?")
self.assertRaisesMessage(ValueError, "[.*x+]y?", func1)
class AssertFieldOutputTests(SimpleTestCase):
def test_assert_field_output(self):
error_invalid = ['Enter a valid email address.']
self.assertFieldOutput(EmailField, {'a@a.com': 'a@a.com'}, {'aaa': error_invalid})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'a@a.com': 'a@a.com'}, {'aaa': error_invalid + ['Another error']})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'a@a.com': 'Wrong output'}, {'aaa': error_invalid})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'a@a.com': 'a@a.com'}, {'aaa': ['Come on, gimme some well formatted data, dude.']})
def test_custom_required_message(self):
class MyCustomField(IntegerField):
default_error_messages = {
'required': 'This is really required.',
}
self.assertFieldOutput(MyCustomField, {}, {}, empty_value=None)
class DoctestNormalizerTest(IgnorePendingDeprecationWarningsMixin, SimpleTestCase):
def test_normalizer(self):
from django.test.simple import make_doctest
suite = make_doctest("test_utils.doctest_output")
failures = unittest.TextTestRunner(stream=six.StringIO()).run(suite)
self.assertEqual(failures.failures, [])
| bsd-3-clause |
roxana-lafuente/MTTT | main.py | 2 | 52121 | """@brief Main module of TTT."""
# !/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# PyKeylogger: TTT for Linux and Windows
# Copyright (C) 2016 Roxana Lafuente <roxana.lafuente@gmail.com>
# Miguel Lemos <miguelemosreverte@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# os is one of the modules that I know comes with 2.7, no questions asked.
import os
try:
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gi.repository import Gdk
gi.require_version('WebKit', '3.0')
from gi.repository import WebKit
except ImportError:
print "Dependency unfulfilled, please install gi library"
exit(1)
def install_and_import(package):
"""@brief Imports modules and installs them if they are not."""
import importlib
try:
importlib.import_module(package)
except ImportError:
try:
import pip
except ImportError:
print "no pip"
os.system('python get_pip.py')
finally:
import pip
pip.main(['install', package])
finally:
globals()[package] = importlib.import_module(package)
# these other ones I a am not so sure of. Thus the install function.
install_and_import("subprocess")
install_and_import("json")
install_and_import("sys")
install_and_import("time")
install_and_import("shutil")
install_and_import("urlparse")
install_and_import("itertools")
install_and_import("webbrowser")
from commands import *
from files_processing import *
from evaluation import *
from post_editing import PostEditing
from constants import moses_dir_fn, is_valid_dir, is_valid_file, languages
UI_INFO = """
<ui>
<menubar name='MenuBar'>
<menu action='VisualsMenu'>
<menu action='Visuals'>
<menuitem action='metro'/>
<menuitem action='paper'/>
<separator />
<menuitem action='lights_on_option'/>
</menu>
</menu>
</menubar>
</ui>
"""
class MyWindow(Gtk.Window):
"""@brief Main window class."""
def __init__(self):
"""@brief Initializes the main window of TTT."""
# Recognize OS
if os.name == 'posix': # Linux
self.is_linux, self.is_windows = True, False
elif os.name == 'nt': # Windows
self.is_linux, self.is_windows = False, True
else:
print "Unknown OS"
exit(1)
# Check Moses Config file.
self.moses_dir = ""
self.output_directory = ""
try:
f = open(moses_dir_fn, 'r')
self.moses_dir = f.read()
f.close()
except (IOError, OSError):
# File does not exist. We create it.
self.moses_dir = self.get_moses_dir()
f = open(moses_dir_fn, 'w')
f.write(self.moses_dir)
f.close()
else:
if not self.is_moses_dir_valid(self.moses_dir):
# File content is wrong
moses_dir = self.get_moses_dir()
f = open(moses_dir_fn, 'w')
f.write(self.moses_dir)
f.close()
# Main title
Gtk.Window.__init__(self, title="Translators' Training Tool")
self.connect('destroy', self.final_responsabilities)
self.set_border_width(3)
# Toolbar initialization
action_group = Gtk.ActionGroup("my_actions")
self.add_choices_menu_actions(action_group)
uimanager = self.create_ui_manager()
uimanager.insert_action_group(action_group)
menubar = uimanager.get_widget("/MenuBar")
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
box.pack_start(menubar, False, False, 0)
self.gtk_theme = "paper"
self.lightsOption = "gtk"
# Set notebook for tabs
self.notebook = Gtk.Notebook()
box.pack_start(self.notebook, True, True, 0)
self.add(box)
# Add tabs to the notebook
self.is_corpus_preparation_ready = False
# Corpus Preparation tab
self._set_corpus_preparation()
# LM & MT Training tab
self._set_training()
# Translation tab
self._set_translation()
# Evaluation tab
self._set_evaluation()
# Post Editing tab
# self.init_persistent_post_editing_state()
# self._set_post_editing()
# Init
self.source_lang = None
self.target_lang = None
self.original_directory = os.getcwd()
self.notebook.set_current_page(2)
def _check_moses_installation(self, directory):
"""@brief Determines if directory contains moses."""
# TODO: TRY catch OSError when permission denied!!
file_content = [f for f in os.listdir(directory)]
moses_files = [
"/scripts/tokenizer/tokenizer.perl",
"/scripts/recaser/truecase.perl",
"/scripts/training/clean-corpus-n.perl",
"/bin/lmplz",
"/bin/build_binary",
"/scripts/training/train-model.perl",
"/bin/moses"
]
if self.is_windows:
moses_files = [f.replace("/", "\\")
for f in moses_files]
moses_files = [f + ".exe"
for f in moses_files
if "/bin" in f]
is_valid = True
for mfile in moses_files:
is_valid = is_valid and os.path.isfile(directory + mfile)
return is_valid
def is_moses_dir_valid(self, directory):
"""@brief Determines if it contains a valid moses installation."""
is_valid = True
if directory == "":
is_valid = False # Empty string
elif not os.path.exists(directory):
is_valid = False # Directory does not exist
else:
# Check if dir exists but does not contain moses installation
is_valid = self._check_moses_installation(directory)
return is_valid
def get_moses_dir(self):
"""@brief Gets Moses directory."""
directory = self.moses_dir
response = Gtk.ResponseType.ACCEPT
while response == Gtk.ResponseType.ACCEPT and not self.is_moses_dir_valid(directory):
label = Gtk.Label("Enter MOSES installation directory")
entry = Gtk.Entry()
button = Gtk.Button("Choose File")
button.connect("clicked", self._on_dir_clicked, entry)
dialog = Gtk.Dialog("Moses configuration",
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT))
box = dialog.get_content_area()
box.add(label)
box.add(entry)
box.add(button)
label.show()
entry.show()
button.show()
response = dialog.run()
directory = entry.get_text()
dialog.destroy()
# If it is not valid, keep asking until valid or user leaves.
if response != Gtk.ResponseType.ACCEPT:
exit(1)
self.moses_dir = directory
return directory
def _on_languages_combo_changed(self, combo, attribute):
"""@brief Gets the SL and TL from combo box."""
if attribute == "ST":
self.source_lang = combo.get_active_text()
elif attribute == "TT":
self.target_lang = combo.get_active_text()
def _set_corpus_preparation(self):
"""@brief GUI elements to run truecaser, tokenizer and cleaner."""
self.preparation = Gtk.VBox()
grid = Gtk.Grid()
inside_grid = Gtk.Grid()
# Languages Frame.
lang_frame = Gtk.Frame(label="Languages")
# Source language picker
stlang_label = Gtk.Label("Source text")
inside_grid.add(stlang_label)
self.stlang_box = Gtk.ComboBoxText()
self.stlang_box.set_entry_text_column(0)
self.stlang_box.connect("changed",
self._on_languages_combo_changed,
"ST")
for language in languages:
self.stlang_box.append_text(language)
inside_grid.add(self.stlang_box)
# Target language picker
ttlang_label = Gtk.Label("Target text")
inside_grid.attach_next_to(ttlang_label,
stlang_label,
Gtk.PositionType.BOTTOM,
1,
1)
self.ttlang_box = Gtk.ComboBoxText()
self.ttlang_box.set_entry_text_column(0)
self.ttlang_box.connect("changed",
self._on_languages_combo_changed,
"TT")
for language in languages:
self.ttlang_box.append_text(language)
inside_grid.attach_next_to(self.ttlang_box,
self.stlang_box,
Gtk.PositionType.BOTTOM,
1,
1)
inside_grid.set_column_spacing(10)
filler = Gtk.Label(" ")
grid.attach(filler, 0, 0, 1, 1)
lang_frame.add(inside_grid)
grid.add(lang_frame)
# Output frame.
preprocess_results_frame = Gtk.Frame(label="Results")
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_min_content_height(200)
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
preprocessResultsText = Gtk.TextView()
preprocessResultsText.set_editable(False)
preprocessResultsText.set_cursor_visible(False)
preprocessResultsText.set_wrap_mode(True)
self.preprocessResultsTextBuffer = preprocessResultsText.get_buffer()
scrolledwindow.add(preprocessResultsText)
preprocess_results_frame.add(scrolledwindow)
grid.attach_next_to(preprocess_results_frame,
lang_frame,
Gtk.PositionType.BOTTOM,
4, # number of columns the child will span
7) # number of rows the child will span
# Translation Model Frame.
inside_grid = Gtk.Grid()
tm_frame = Gtk.Frame(label="Translation Model")
# Translation Model: Source Text Picker
st_label = Gtk.Label("Source text")
inside_grid.add(st_label)
self.st_train = Gtk.Entry()
self.st_train.set_text("")
inside_grid.add(self.st_train)
self.st_button = Gtk.Button("Choose File")
self.st_button.connect("clicked", self._on_file_clicked, self.st_train)
inside_grid.add(self.st_button)
# Translation Model: Target Text Picker
tt_label = Gtk.Label("Target text")
inside_grid.attach_next_to(tt_label,
st_label,
Gtk.PositionType.BOTTOM,
1,
10)
self.tt_train = Gtk.Entry()
self.tt_train.set_text("")
inside_grid.attach_next_to(self.tt_train,
self.st_train,
Gtk.PositionType.BOTTOM,
1,
1)
self.tt_button = Gtk.Button("Choose File")
self.tt_button.connect("clicked", self._on_file_clicked, self.tt_train)
inside_grid.attach_next_to(self.tt_button,
self.st_button,
Gtk.PositionType.BOTTOM,
1,
1)
inside_grid.set_column_spacing(10)
tm_frame.add(inside_grid)
grid.add(tm_frame)
# Language Model Frame.
lm_frame = Gtk.Frame(label="Language Model")
inside_grid = Gtk.Grid()
inside_grid.add(Gtk.Label("Source text"))
self.lm_text = Gtk.Entry()
self.lm_text.set_text("")
inside_grid.add(self.lm_text)
self.lm_button = Gtk.Button("Choose File")
self.lm_button.connect("clicked", self._on_file_clicked, self.lm_text)
inside_grid.add(self.lm_button)
lm_frame.add(inside_grid)
grid.attach_next_to(lm_frame, tm_frame, Gtk.PositionType.BOTTOM, 1, 1)
# Output directory.
s_frame = Gtk.Frame(label="Settings")
inside_grid = Gtk.Grid()
inside_grid.add(Gtk.Label("Output directory"))
self.language_model_directory_entry = Gtk.Entry()
self.language_model_directory_entry.set_text("")
inside_grid.add(self.language_model_directory_entry)
self.s_button = Gtk.Button("Choose Directory")
self.s_button.connect("clicked",
self._on_dir_clicked,
self.language_model_directory_entry,
"change output directory")
inside_grid.add(self.s_button)
inside_grid.set_row_spacing(10)
inside_grid.set_column_spacing(10)
s_frame.add(inside_grid)
grid.attach_next_to(s_frame, lm_frame, Gtk.PositionType.BOTTOM, 1, 1)
# Start corpus preprocessing button.
sbutton = Gtk.Button(label="Start corpus preprocessing")
sbutton.connect("clicked", self._prepare_corpus)
# self.preparation.set_border_width(10)
grid.attach_next_to(sbutton, s_frame, Gtk.PositionType.BOTTOM, 1, 1)
grid.set_row_spacing(20)
grid.set_column_spacing(20)
# self.preparation.add(grid)
self.preparation.pack_start(grid,
expand=True,
fill=True,
padding=30)
self.notebook.insert_page(self.preparation,
Gtk.Label('Corpus preparation'), 0)
def _has_chosen_lang(self):
"""@brief Determines if source and target language have been chosen."""
return not self.source_lang is None and not self.target_lang is None
def _has_chosen_preprocess_params(self, language_model_directory):
"""@brief Determines if all data for preprocessing is ready"""
is_ready = is_valid_dir(language_model_directory) and self._has_chosen_lang()
is_ready = is_ready and is_valid_file(self.tt_train.get_text())
is_ready = is_ready and is_valid_file(self.st_train.get_text())
is_ready = is_ready and is_valid_file(self.lm_text.get_text())
return is_ready
def _prepare_corpus(self, button):
"""@brief Runs moses truecaser, tokenizer and cleaner."""
output = ""
win_output_directory = self.language_model_directory_entry.get_text()
if self._has_chosen_preprocess_params(win_output_directory):
language_model_directory = adapt_path_for_cygwin(self.is_windows,
win_output_directory)
# Change directory to the language_model_directory.
try:
os.chdir(win_output_directory)
except:
# Output directory does not exist.
os.mkdir(win_output_directory)
os.chdir(win_output_directory)
cmds = []
# 1) Tokenization
# a) Target text
self.target_tok = generate_input_tok_fn(self.target_lang,
language_model_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.target_lang,
adapt_path_for_cygwin(self.is_windows, self.tt_train.get_text()),
self.target_tok))
# b) Source text
self.source_tok = generate_input_tok_fn(self.source_lang,
language_model_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.source_lang,
adapt_path_for_cygwin(self.is_windows, self.st_train.get_text()),
self.source_tok))
# c) Language model
self.lm_tok = generate_lm_tok_fn(language_model_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.source_lang,
adapt_path_for_cygwin(self.is_windows,self.lm_text.get_text()),
self.lm_tok))
# 2) Truecaser training
# a) Target text
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.output_directory,
self.target_lang,
self.target_tok))
# b) Source text
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.output_directory,
self.source_lang,
self.source_tok))
# c) Language model
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.output_directory,
self.target_lang,
self.lm_tok))
# 3) Truecaser
self.input_true = language_model_directory + "/input.true"
# a) Target text
self.target_true = generate_input_true_fn(self.target_lang,
language_model_directory)
cmds.append(get_truecaser_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.output_directory,
self.target_lang,
self.target_tok,
self.target_true))
# b) Source text
self.source_true = generate_input_true_fn(self.source_lang,
language_model_directory)
cmds.append(get_truecaser_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.output_directory,
self.source_lang,
self.source_tok,
self.source_true))
# c) Language model
self.lm_true = generate_lm_true_fn(language_model_directory)
cmds.append(get_truecaser_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.output_directory,
self.target_lang,
self.target_tok, self.lm_true))
# 4) Cleaner
# a) Target text
self.input_clean = generate_input_clean_fn(language_model_directory)
self.source_clean = self.input_true + "." + self.source_lang
self.target_clean = self.input_true + "." + self.target_lang
cmds.append(get_cleaner_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.source_lang,
self.target_lang,
self.input_true,
self.input_clean))
# Start threads
all_ok = True
for cmd in cmds:
output += "Running command: %s" % cmd
proc = subprocess.Popen([cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
all_ok = all_ok and (proc.wait() == 0)
out, err = proc.communicate()
output += "Output: %s\n%s\n\n\n" % (out, err)
if all_ok:
self.is_corpus_preparation_ready = True
else:
output += "ERROR. You need to complete all fields."
self.preprocessResultsTextBuffer.set_text(output)
os.chdir(self.original_directory)
def _on_file_clicked(self, widget, labelToUpdate, tab_name = "undefined"):
"""@brief Get file path from dialog."""
dialog = Gtk.FileChooserDialog("Please choose a file", None,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
self._add_file_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
labelToUpdate.set_text(dialog.get_filename())
elif response == Gtk.ResponseType.CANCEL:
labelToUpdate.set_text("")
if tab_name == "Machine translation":
self.mt_out_text = os.path.dirname(dialog.get_filename())
dialog.destroy()
def _on_dir_clicked(self, widget, labelToUpdate, command = ""):
"""@brief Get folder path from dialog."""
dialog = Gtk.FileChooserDialog("Please choose a directory", None,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
self._add_dir_filters(dialog)
dialog.set_action(Gtk.FileChooserAction.SELECT_FOLDER)
response = dialog.run()
if response == Gtk.ResponseType.OK:
labelToUpdate.set_text(dialog.get_filename())
elif response == Gtk.ResponseType.CANCEL:
labelToUpdate.set_text("")
if "change output directory" in command:
self.output_directory = dialog.get_filename()
self.post_editing_output.set_text(self.output_directory)
self.evaluation_output.set_text(self.output_directory)
dialog.destroy()
if command == "change output directory and maybe create post edition table":
self._check_if_both_files_are_choosen_post_edition(None, "")
def _add_dir_filters(self, dialog):
"""@brief Add folder filters for folder choosing."""
# TODO: Allow to only choose folders
filter_text = Gtk.FileFilter()
filter_any = Gtk.FileFilter()
filter_any.set_name("Any files")
filter_any.add_pattern("*")
dialog.add_filter(filter_any)
def _add_file_filters(self, dialog):
"""@brief Add file filters for file choosing."""
filter_text = Gtk.FileFilter()
filter_any = Gtk.FileFilter()
filter_any.set_name("Any files")
filter_any.add_pattern("*")
dialog.add_filter(filter_any)
def _set_training(self):
"""@brief Prepares GUI to run MT and LM training."""
self.training = Gtk.Box()
grid = Gtk.Grid()
# Start training button.
self.start_training_button = Gtk.Button("Start training")
self.start_training_button.connect("clicked", self._train)
grid.add(self.start_training_button)
# Output frame.
training_results_frame = Gtk.Frame(label="Results")
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
resultsText = Gtk.TextView()
resultsText.set_editable(False)
resultsText.set_cursor_visible(False)
resultsText.set_wrap_mode(True)
self.trainingResultsTextBuffer = resultsText.get_buffer()
scrolledwindow.add(resultsText)
training_results_frame.add(scrolledwindow)
grid.attach_next_to(training_results_frame,
self.start_training_button,
Gtk.PositionType.BOTTOM,
1,
1)
self.training.add(grid)
self.notebook.insert_page(self.training, Gtk.Label('Training'), 1)
def _train(self, button):
"""@brief Runs MT and LM training."""
language_model_directory = adapt_path_for_cygwin(self.is_windows,
self.language_model_directory_entry.get_text())
if is_valid_dir(language_model_directory) and self.is_corpus_preparation_ready:
os.chdir(self.language_model_directory_entry.get_text())
cmds = []
output = "Log:\n\n"
# Train the language model.
self.lm_arpa = generate_lm_fn(language_model_directory)
cmds.append(get_lmtrain_command(self.moses_dir,
self.target_lang,
self.lm_true,
self.lm_arpa))
# Binarize arpa
self.blm = generate_blm_fn(language_model_directory)
cmds.append(get_blmtrain_command(self.moses_dir,
self.target_lang,
self.lm_arpa,
self.blm))
self.trainingResultsTextBuffer.set_text(output)
# Train the translation model.
out_file = generate_tm_fn(language_model_directory)
cmds.append(get_tmtrain_command(self.moses_dir,
self.source_lang,
self.target_lang,
self.blm,
self.input_clean,
language_model_directory))
# TODO!
# Binarize phase-table.gz
# Binarize reordering-table.wbe-msd-bidirectional-fe.gz
# Change PhraseDictionaryMemory to PhraseDictionaryCompact
# Set the path of the PhraseDictionary feature to point to:
# $HOME/working/binarised-model/phrase-table.minphr
# Set the path of the LexicalReordering feature to point to:
# $HOME/working/binarised-model/reordering-table
for cmd in cmds:
# use Popen for non-blocking
output += cmd
proc = subprocess.Popen([cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
proc.wait()
(out, err) = proc.communicate()
if out != "":
output += out
elif err != "":
output += err
# Adding output from training.out
training = adapt_path_for_cygwin(self.is_windows, self.language_model_directory_entry.get_text()) + "/training.out"
try:
with open(training, "r") as f:
output += "\n" + f.read()
except IOError:
output += "Error. Unable to create moses.ini"
# Set output to the output label.
self.trainingResultsTextBuffer.set_text(output)
else:
output = "ERROR: Uncompleted preprocessing. "
output += "Please go to the first tab and complete the process."
self.trainingResultsTextBuffer.set_text(output)
os.chdir(self.original_directory)
def _set_translation(self):
"""@brief Prepares GUI for running the decoder."""
self.translation = Gtk.Box()
# Machine Translation Frame.
grid = Gtk.Grid()
inside_grid = Gtk.Grid()
mt_frame = Gtk.Frame(label="Machine translation")
# Source Text Picker
mt_in_label = Gtk.Label("Source text file")
inside_grid.add(mt_in_label)
self.mt_in_text = Gtk.Entry()
self.mt_in_text.set_text("")
inside_grid.add(self.mt_in_text)
self.mt_in_button = Gtk.Button("Choose File")
self.mt_in_button.connect("clicked",
self._on_file_clicked,
self.mt_in_text,
"Machine translation")
inside_grid.add(self.mt_in_button)
self.mt_out_text = ""
self.mt_out2_button = Gtk.Button("Choose a Model")
self.mt_out2_button.connect("clicked",
self._on_dir_clicked,
self.language_model_directory_entry)
inside_grid.attach_next_to(self.mt_out2_button,
self.mt_in_button,
Gtk.PositionType.RIGHT,
1,
50)
self.mt_out3_button = Gtk.Button("Create a Model")
self.mt_out3_button.connect("clicked",
self._create_model,
self.mt_out_text)
inside_grid.attach_next_to(self.mt_out3_button,
self.mt_out2_button,
Gtk.PositionType.RIGHT,
1,
50)
# Start machine translation button.
sbutton = Gtk.Button(label="Start machine translation")
sbutton.connect("clicked", self._machine_translation)
inside_grid.attach_next_to(sbutton,
self.mt_in_button,
Gtk.PositionType.BOTTOM,
1,
10)
mt_frame.add(inside_grid)
grid.add(mt_frame)
# Output label.
mt_training_results_frame = Gtk.Frame(label="Results")
mtscrolledwindow = Gtk.ScrolledWindow()
mtscrolledwindow.set_hexpand(True)
mtscrolledwindow.set_vexpand(True)
mtresultsText = Gtk.TextView()
mtresultsText.set_editable(False)
mtresultsText.set_cursor_visible(False)
mtresultsText.set_wrap_mode(True)
self.mttrainingResultsTextBuffer = mtresultsText.get_buffer()
mtscrolledwindow.add(mtresultsText)
mt_training_results_frame.add(mtscrolledwindow)
grid.attach_next_to(mt_training_results_frame,
mt_frame,
Gtk.PositionType.BOTTOM,
1,
1)
self.translation.add(grid)
self.notebook.insert_page(self.translation,
Gtk.Label('Machine Translation'),2)
def _create_model(self, a, b):
self.notebook.set_current_page(0)
def _has_empty_last_line(self, fn):
"""@brief Determines if last line of file is empty."""
last_line_is_empty = False
try:
with open(fn, 'r') as f:
last_line_is_empty = "\n" in (f.readlines()[-1])
except Exception as e:
last_line_is_empty = False
return last_line_is_empty
def _machine_translation(self, button):
"""@brief Runs the decoder."""
output = ""
in_file = self.mt_in_text.get_text()
if not is_valid_file(in_file):
output = "ERROR: %s should be a valid file." % in_file
elif not self._has_empty_last_line(in_file):
output = "ERROR: %s lacks an empty line at the end of the file." % in_file
else:
base = os.path.basename(in_file)
out_file = os.path.dirname(in_file) + os.path.splitext(base)[0] + "_translated" + os.path.splitext(base)[1]
in_file = adapt_path_for_cygwin(self.is_windows, in_file)
out_file = adapt_path_for_cygwin(self.is_windows, out_file)
output += "Running decoder....\n\n"
lmdir = self.language_model_directory_entry.get_text()
if is_valid_dir(lmdir):
# Run the decoder.
cmd = get_test_command(self.moses_dir,
adapt_path_for_cygwin(self.is_windows, lmdir) + "/train/model/moses.ini",
in_file,
out_file)
# use Popen for non-blocking
proc = subprocess.Popen([cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
(out, err) = proc.communicate()
f = open(out_file, 'r')
mt_result = f.read()
if mt_result == "":
if out != "":
output += out
elif err != "":
output += err
else:
output += "Best translation: " + mt_result
f.close()
else:
output += "ERROR. You need to load or create a model first."
# Set output to the output label.
self.mttrainingResultsTextBuffer.set_text(output)
def _set_evaluation(self):
self.preparation = Gtk.VBox()
grid = Gtk.Grid()
# Evaluation Frame.
inside_grid = Gtk.Grid()
texts_menu_frame = Gtk.Frame(label="Evaluation")
# Evaluation Metrics: Source Text Picker
st_label = Gtk.Label("Source text")
inside_grid.add(st_label)
self.evaluation_source = Gtk.Entry()
self.evaluation_source.set_text("")
inside_grid.add(self.evaluation_source)
self.st_button = Gtk.Button("Choose File")
self.st_button.connect("clicked",
self._on_file_clicked,
self.evaluation_source)
inside_grid.add(self.st_button)
# Evaluation Metrics: Reference Text Picker
tt_label = Gtk.Label("Reference text")
inside_grid.attach_next_to(tt_label,
st_label,
Gtk.PositionType.BOTTOM, 1, 10)
self.evaluation_reference = Gtk.Entry()
self.evaluation_reference.set_text("")
inside_grid.attach_next_to(self.evaluation_reference,
self.evaluation_source,
Gtk.PositionType.BOTTOM, 1, 10)
self.tt_button = Gtk.Button("Choose File")
self.tt_button.connect("clicked",
self._on_file_clicked,
self.evaluation_reference)
inside_grid.attach_next_to(self.tt_button,
self.st_button,
Gtk.PositionType.BOTTOM, 1, 10)
# Evaluation Metrics: Output Text Picker
ot_label = Gtk.Label("Output Directory")
inside_grid.attach_next_to(ot_label,
tt_label,
Gtk.PositionType.BOTTOM, 1, 10)
self.evaluation_output = Gtk.Entry()
self.evaluation_output.set_text("")
inside_grid.attach_next_to(self.evaluation_output,
self.evaluation_reference,
Gtk.PositionType.BOTTOM, 1, 10)
ot_button = Gtk.Button("Choose Directory")
ot_button.connect("clicked",
self._on_dir_clicked,
self.evaluation_output,
"change output directory")
inside_grid.attach_next_to(ot_button,
self.tt_button,
Gtk.PositionType.BOTTOM, 1, 10)
inside_grid.set_column_spacing(10)
texts_menu_frame.add(inside_grid)
grid.add(texts_menu_frame)
grid.set_row_spacing(1)
grid.set_column_spacing(20)
# Evaluation Metrics Frame.
inside_grid = Gtk.Grid()
buttons_frame = Gtk.Frame(label="Evaluation Metrics")
# Evaluation Metrics: Evaluations Picker
self.check_WER = Gtk.CheckButton.new_with_label("WER")
self.check_PER = Gtk.CheckButton.new_with_label("PER")
self.check_HTER = Gtk.CheckButton.new_with_label("HTER")
self.check_GTM = Gtk.CheckButton.new_with_label("GTM")
self.check_BLEU = Gtk.CheckButton.new_with_label("BLEU")
self.check_BLEU2GRAM = Gtk.CheckButton.new_with_label("BLEU2GRAM")
self.check_BLEU3GRAM = Gtk.CheckButton.new_with_label("BLEU3GRAM")
self.check_BLEU4GRAM = Gtk.CheckButton.new_with_label("BLEU4GRAM")
inside_grid.add(self.check_WER)
inside_grid.add(self.check_PER)
inside_grid.add(self.check_HTER)
inside_grid.add(self.check_GTM)
inside_grid.attach_next_to(self.check_BLEU,
self.check_WER,
Gtk.PositionType.BOTTOM, 1, 1)
inside_grid.attach_next_to(self.check_BLEU2GRAM,
self.check_PER,
Gtk.PositionType.BOTTOM, 1, 1)
inside_grid.attach_next_to(self.check_BLEU3GRAM,
self.check_HTER,
Gtk.PositionType.BOTTOM, 1, 1)
inside_grid.attach_next_to(self.check_BLEU4GRAM,
self.check_GTM,
Gtk.PositionType.BOTTOM, 1, 1)
self.evaluate_button = Gtk.Button("Start evaluation ")
self.evaluate_button.connect("clicked", self._evaluate)
inside_grid.attach(self.evaluate_button, 0, 2, 3, 1)
buttons_frame.add(inside_grid)
grid.add(buttons_frame)
# Evaluation: Results
inside_grid = Gtk.Grid()
evaluation_results_frame = Gtk.Frame(label="Results")
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
resultsText = Gtk.TextView()
resultsText.set_editable(False)
resultsText.set_cursor_visible(False)
resultsText.set_wrap_mode(True)
self.resultsTextBuffer = resultsText.get_buffer()
scrolledwindow.add(resultsText)
evaluation_results_frame.add(scrolledwindow)
grid.attach(evaluation_results_frame, 0, 1, 3, 1)
self.preparation.pack_start(grid, expand=True, fill=True, padding=0)
self.notebook.insert_page(self.preparation,
Gtk.Label('Evaluation'), 3)
def _evaluate(self, button):
fields_filled = (self.evaluation_source.get_text()
and self.evaluation_reference.get_text()
and self.evaluation_output.get_text())
files_exists = (is_valid_file(self.evaluation_source.get_text())
and is_valid_file(self.evaluation_reference.get_text())
and is_valid_dir(self.evaluation_output.get_text()))
if fields_filled and files_exists:
# checkbox_indexes["WER","PER","HTER", "GTM", "BLEU","BLEU2GRAM","BLEU3GRAM"]
checkbox_indexes = [False] * 8
if self.check_WER.get_active():
checkbox_indexes[0] = True
if self.check_PER.get_active():
checkbox_indexes[1] = True
if self.check_HTER.get_active():
checkbox_indexes[2] = True
if self.check_GTM.get_active():
checkbox_indexes[3] = True
if self.check_BLEU.get_active():
checkbox_indexes[4] = True
if self.check_BLEU2GRAM.get_active():
checkbox_indexes[5] = True
if self.check_BLEU3GRAM.get_active():
checkbox_indexes[6] = True
if self.check_BLEU4GRAM.get_active():
checkbox_indexes[7] = True
result = evaluate(checkbox_indexes,
self.evaluation_source.get_text(),
self.evaluation_reference.get_text())
evaluation_output_filename = self.evaluation_output.get_text()+"/evaluation_output.txt"
print evaluation_output_filename
f = open(evaluation_output_filename, 'w')
f.write(result)
f.close()
self.resultsTextBuffer.set_text(result)
if not fields_filled:
self.resultsTextBuffer.set_text("ERROR. You need to complete all fields.")
if not files_exists:
if not is_valid_file(self.evaluation_source.get_text()):
self.resultsTextBuffer.set_text("ERROR. The evaluation source file does not exist.")
if not is_valid_file(self.evaluation_reference.get_text()):
self.resultsTextBuffer.set_text("ERROR. The evaluation reference file does not exist.")
if not is_valid_dir(self.evaluation_output.get_text()):
self.resultsTextBuffer.set_text("ERROR. The evaluation output directory is not choosen.")
def init_persistent_post_editing_state(self):
self.post_editing_source_text = ""
self.post_editing_reference_text = ""
self.choosed_bilingual_post_editing_mode = False
def _set_post_editing(self):
self.notebook.remove_page(4)
self.preparation = Gtk.VBox()
self.postEdition_grid = Gtk.Grid()
self.postEdition_grid.set_row_spacing(1)
self.postEdition_grid.set_column_spacing(20)
# Post Editing Frame.
self.postEditing_file_menu_grid = Gtk.Grid()
texts_menu_frame = Gtk.Frame(label="Post-Editing")
# Post Editing : Source Text Picker
self.post_editing_reference_label = Gtk.Label("Select MT file")
self.postEditing_file_menu_grid.add(self.post_editing_reference_label)
self.post_editing_reference = Gtk.Entry()
self.post_editing_reference.set_text(self.post_editing_reference_text)
self.postEditing_file_menu_grid.add(self.post_editing_reference)
self.post_editing_reference_button = Gtk.Button("Choose File")
self.post_editing_reference_button.connect("clicked",
self._on_file_clicked,
self.post_editing_reference)
self.postEditing_file_menu_grid.add(self.post_editing_reference_button)
self.btn_check_bilingual = Gtk.CheckButton.new_with_label("bilingual")
self.postEditing_file_menu_grid.attach_next_to(self.btn_check_bilingual, self.post_editing_reference_button, Gtk.PositionType.RIGHT, 1, 10)
self.btn_check_bilingual.set_active(self.choosed_bilingual_post_editing_mode)
self.btn_check_bilingual.connect("clicked", self.toggle_bilingual)
self.post_editing_source_label = Gtk.Label("Select source file")
self.postEditing_file_menu_grid.attach_next_to(self.post_editing_source_label, self.post_editing_reference_label, Gtk.PositionType.BOTTOM, 1, 10)
self.post_editing_source = Gtk.Entry()
self.post_editing_source.set_text(self.post_editing_source_text)
self.postEditing_file_menu_grid.attach_next_to(self.post_editing_source, self.post_editing_reference, Gtk.PositionType.BOTTOM, 1, 10)
self.post_editing_source_button = Gtk.Button("Choose File")
self.post_editing_source_button.connect("clicked", self._on_file_clicked, self.post_editing_source)
self.postEditing_file_menu_grid.attach_next_to(self.post_editing_source_button, self.post_editing_reference_button, Gtk.PositionType.BOTTOM, 1, 10)
self.post_editing_reference.connect("changed", self._check_if_both_files_are_choosen_post_edition, "reference")
self.post_editing_source.connect("changed", self._check_if_both_files_are_choosen_post_edition, "source")
# Post Editing: Output Text Picker
ot_label = Gtk.Label("Output Directory")
self.postEditing_file_menu_grid.attach_next_to(ot_label,
self.post_editing_source_label,
Gtk.PositionType.BOTTOM, 1, 10)
self.post_editing_output = Gtk.Entry()
self.post_editing_output.set_text(self.output_directory)
self.postEditing_file_menu_grid.attach_next_to(self.post_editing_output,
self.post_editing_source,
Gtk.PositionType.BOTTOM, 1, 10)
ot_button = Gtk.Button("Choose Directory")
ot_button.connect("clicked",
self._on_dir_clicked,
self.post_editing_output,
"change output directory and maybe create post edition table")
self.postEditing_file_menu_grid.attach_next_to(ot_button,
self.post_editing_source_button,
Gtk.PositionType.BOTTOM, 1, 10)
self.postEdition_grid.add(self.postEditing_file_menu_grid)
self.preparation.pack_start(self.postEdition_grid, expand=True, fill=True, padding =0)
self.notebook.insert_page(self.preparation, Gtk.Label('Post Editing'), 4)
self.post_editing_source_label.set_no_show_all(True)
self.post_editing_source.set_no_show_all(True)
self.post_editing_source_button.set_no_show_all(True)
self.post_editing_source_label.set_no_show_all(True)
self.toggle_bilingual(None)
self.notebook.show_all()
def toggle_bilingual(self,button):
visibility = self.btn_check_bilingual.get_active()
self.choosed_bilingual_post_editing_mode = visibility
self.post_editing_source_label.set_visible(visibility)
self.post_editing_source.set_visible(visibility)
self.post_editing_source_button.set_visible(visibility)
self.post_editing_source_label.set_visible(visibility)
def _check_if_both_files_are_choosen_post_edition(self, object, file_type=""):
if file_type == "source": self.post_editing_source_text = self.post_editing_source.get_text()
if file_type == "reference": self.post_editing_reference_text = self.post_editing_reference.get_text()
if self.output_directory:
if ((self.post_editing_source.get_text()
and self.post_editing_reference.get_text())
or not self.btn_check_bilingual.get_active()):
post_editing_source_text = self.post_editing_source.get_text()
post_editing_reference_text = self.post_editing_reference.get_text()
self._set_post_editing()
self.notebook.set_current_page(4)
# binding of the buttons events to the PostEditing methods
self.PostEditing = PostEditing(
post_editing_source_text, # so that it can read the source file
post_editing_reference_text, # so that it can read the reference file
self.notebook, # so that it can add the diff tab when needed
self.postEdition_grid, # so that it can add search entry and table
self.output_directory) # so that it can save files on the output directory
def gtk_change_visuals(self, light_option="unchanged", theme="unchanged"):
if Gtk.MAJOR_VERSION >= 3 and Gtk.MINOR_VERSION >= 14:
css_filename = "gtk"
filename = ""
if theme == "metro" or theme == "paper":
self.gtk_theme = theme
if light_option == "gtk" or light_option == "gtk-dark":
self.lightsOption = light_option
filename = 'gui/' + self.gtk_theme + '/'+ self.lightsOption + '.css'
css = open(filename, 'r')
style_provider = Gtk.CssProvider()
css_data = css.read()
css.close()
style_provider.load_from_data(css_data)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
def add_choices_menu_actions(self, action_group):
self.preferences_button = Gtk.Action("VisualsMenu", "Preferences", None, None)
action_group.add_action(self.preferences_button)
action_visualsmenu = Gtk.Action("Visuals", "Visuals", None, None)
action_group.add_action_with_accel(action_visualsmenu, None)
action_group.add_radio_actions([
("metro", None, "metro", None, None, 1),
("paper", None, "paper", None, None, 2)
], 2, self.on_menu_choices_changed)
lights_on_widget = Gtk.ToggleAction("lights_on_option",
"Turn lights off",
None, None)
lights_on_widget.connect("toggled", self.on_menu_choices_toggled)
action_group.add_action(lights_on_widget)
def create_ui_manager(self):
uimanager = Gtk.UIManager()
uimanager.add_ui_from_string(UI_INFO)
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
self.add_accel_group(accelgroup)
return uimanager
def on_menu_choices_changed(self, widget, current):
self.gtk_change_visuals(light_option="unchanged",
theme=current.get_name())
def on_menu_choices_toggled(self, widget):
if widget.get_active():
self.gtk_change_visuals(light_option="gtk-dark", theme="unchanged")
else:
self.gtk_change_visuals(light_option="gtk", theme="unchanged")
def final_responsabilities(self, widget=None):
if hasattr(self, 'PostEditing'):
self.PostEditing.saveChangedFromPostEditing()
self.PostEditing.delete_generated_files()
win = MyWindow()
win.set_name('TTT')
win.gtk_change_visuals(light_option="gtk", theme="paper")
win.connect("delete-event", Gtk.main_quit)
style_provider = Gtk.CssProvider()
style_provider.load_from_path("css/style.css")
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
win.show_all()
Gtk.main()
# TODOs
# 1- Check that files source and target have at least 100 lines.
# 2- Add buttons for choosing number of cores to use and other parameters.
| gpl-3.0 |
wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/groups/remove_members.py | 2 | 2345 | # Copyright 2015 Google Inc. All Rights Reserved.
"""Command for removing a user from a group."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import user_utils
class RemoveMembers(base_classes.NoOutputAsyncMutator,
user_utils.UserResourceFetcher):
"""Remove a user from a Google Compute Engine group.
*{command}* removes a user from a Google Compute Engine group.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'names',
metavar='NAME',
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
help='The names of the groups to remove members from.')
parser.add_argument(
'--members',
metavar='USERNAME',
required=True,
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
help='The names or fully-qualified URLs of the users to remove.')
@property
def service(self):
return self.computeaccounts.groups
@property
def method(self):
return 'RemoveMember'
@property
def resource_type(self):
return 'groups'
@property
def messages(self):
return self.computeaccounts.MESSAGES_MODULE
def CreateRequests(self, args):
user_refs = self.CreateAccountsReferences(
args.members, resource_type='users')
group_refs = self.CreateAccountsReferences(
args.names, resource_type='groups')
requests = []
for group_ref in group_refs:
for user_ref in user_refs:
remove_member = self.messages.GroupsRemoveMemberRequest(
users=[user_ref.SelfLink()])
request = self.messages.ComputeaccountsGroupsRemoveMemberRequest(
project=self.project,
groupsRemoveMemberRequest=remove_member,
groupName=group_ref.Name())
requests.append(request)
return requests
RemoveMembers.detailed_help = {
'EXAMPLES': """\
To remove a user from a group, run:
$ {command} example-group --members example-user
To remove multiple users from multiple groups with
one command, run
$ {command} example-group-1 example-group-2 \\
--members example-user-1 example-user-2
""",
}
| apache-2.0 |
Davidjohnwilson/sympy | sympy/core/core.py | 87 | 2874 | """ The core's core. """
from __future__ import print_function, division
# used for canonical ordering of symbolic sequences
# via __cmp__ method:
# FIXME this is *so* irrelevant and outdated!
ordering_of_classes = [
# singleton numbers
'Zero', 'One', 'Half', 'Infinity', 'NaN', 'NegativeOne', 'NegativeInfinity',
# numbers
'Integer', 'Rational', 'Float',
# singleton symbols
'Exp1', 'Pi', 'ImaginaryUnit',
# symbols
'Symbol', 'Wild', 'Temporary',
# arithmetic operations
'Pow', 'Mul', 'Add',
# function values
'Derivative', 'Integral',
# defined singleton functions
'Abs', 'Sign', 'Sqrt',
'Floor', 'Ceiling',
'Re', 'Im', 'Arg',
'Conjugate',
'Exp', 'Log',
'Sin', 'Cos', 'Tan', 'Cot', 'ASin', 'ACos', 'ATan', 'ACot',
'Sinh', 'Cosh', 'Tanh', 'Coth', 'ASinh', 'ACosh', 'ATanh', 'ACoth',
'RisingFactorial', 'FallingFactorial',
'factorial', 'binomial',
'Gamma', 'LowerGamma', 'UpperGamma', 'PolyGamma',
'Erf',
# special polynomials
'Chebyshev', 'Chebyshev2',
# undefined functions
'Function', 'WildFunction',
# anonymous functions
'Lambda',
# Landau O symbol
'Order',
# relational operations
'Equality', 'Unequality', 'StrictGreaterThan', 'StrictLessThan',
'GreaterThan', 'LessThan',
]
class Registry(object):
"""
Base class for registry objects.
Registries map a name to an object using attribute notation. Registry
classes behave singletonically: all their instances share the same state,
which is stored in the class object.
All subclasses should set `__slots__ = []`.
"""
__slots__ = []
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __delattr__(self, name):
delattr(self.__class__, name)
#A set containing all sympy class objects
all_classes = set()
class BasicMeta(type):
def __init__(cls, *args, **kws):
all_classes.add(cls)
def __cmp__(cls, other):
# If the other object is not a Basic subclass, then we are not equal to
# it.
if not isinstance(other, BasicMeta):
return -1
n1 = cls.__name__
n2 = other.__name__
if n1 == n2:
return 0
UNKNOWN = len(ordering_of_classes) + 1
try:
i1 = ordering_of_classes.index(n1)
except ValueError:
i1 = UNKNOWN
try:
i2 = ordering_of_classes.index(n2)
except ValueError:
i2 = UNKNOWN
if i1 == UNKNOWN and i2 == UNKNOWN:
return (n1 > n2) - (n1 < n2)
return (i1 > i2) - (i1 < i2)
def __lt__(cls, other):
if cls.__cmp__(other) == -1:
return True
return False
def __gt__(cls, other):
if cls.__cmp__(other) == 1:
return True
return False
| bsd-3-clause |
legalsylvain/OpenUpgrade | addons/sale/edi/sale_order.py | 403 | 10861 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.addons.edi import EDIMixin
from openerp.tools.translate import _
from werkzeug import url_encode
SALE_ORDER_LINE_EDI_STRUCT = {
'sequence': True,
'name': True,
#custom: 'date_planned'
'product_id': True,
'product_uom': True,
'price_unit': True,
#custom: 'product_qty'
'discount': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
SALE_ORDER_EDI_STRUCT = {
'name': True,
'origin': True,
'company_id': True, # -> to be changed into partner
#custom: 'partner_ref'
'date_order': True,
'partner_id': True,
#custom: 'partner_address'
#custom: 'notes'
'order_line': SALE_ORDER_LINE_EDI_STRUCT,
# fields used for web preview only - discarded on import
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
'payment_term': True,
'order_policy': True,
'user_id': True,
'state': True,
}
class sale_order(osv.osv, EDIMixin):
_inherit = 'sale.order'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a Sale order"""
edi_struct = dict(edi_struct or SALE_ORDER_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner_obj = self.pool.get('res.partner')
edi_doc_list = []
for order in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, order, context=context)
# Get EDI doc based on struct. The result will also contain all metadata fields and attachments.
edi_doc = super(sale_order,self).edi_export(cr, uid, [order], edi_struct, context)[0]
edi_doc.update({
# force trans-typing to purchase.order upon import
'__import_model': 'purchase.order',
'__import_module': 'purchase',
'company_address': res_company.edi_export_address(cr, uid, order.company_id, context=context),
'partner_address': res_partner_obj.edi_export(cr, uid, [order.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [order.pricelist_id.currency_id],
context=context)[0],
'partner_ref': order.client_order_ref or False,
'notes': order.note or False,
})
edi_doc_list.append(edi_doc)
return edi_doc_list
def _edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
company_address_edi['customer'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document['partner_invoice_id'] = partner_edi_m2o
edi_document['partner_shipping_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def _edi_get_pricelist(self, cr, uid, partner_id, currency, context=None):
# TODO: refactor into common place for purchase/sale, e.g. into product module
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
pricelist = partner.property_product_pricelist
if not pricelist:
pricelist = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'list0', context=context)
if not pricelist.currency_id == currency:
# look for a pricelist with the right type and currency, or make a new one
pricelist_type = 'sale'
product_pricelist = self.pool.get('product.pricelist')
match_pricelist_ids = product_pricelist.search(cr, uid,[('type','=',pricelist_type),
('currency_id','=',currency.id)])
if match_pricelist_ids:
pricelist_id = match_pricelist_ids[0]
else:
pricelist_name = _('EDI Pricelist (%s)') % (currency.name,)
pricelist_id = product_pricelist.create(cr, uid, {'name': pricelist_name,
'type': pricelist_type,
'currency_id': currency.id,
})
self.pool.get('product.pricelist.version').create(cr, uid, {'name': pricelist_name,
'pricelist_id': pricelist_id})
pricelist = product_pricelist.browse(cr, uid, pricelist_id)
return self.edi_m2o(cr, uid, pricelist, context=context)
def edi_import(self, cr, uid, edi_document, context=None):
self._edi_requires_attributes(('company_id','company_address','order_line','date_order','currency'), edi_document)
#import company as a new partner
partner_id = self._edi_import_company(cr, uid, edi_document, context=context)
# currency for rounding the discount calculations and for the pricelist
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
order_currency = res_currency.browse(cr, uid, currency_id)
partner_ref = edi_document.pop('partner_ref', False)
edi_document['client_order_ref'] = edi_document['name']
edi_document['name'] = partner_ref or edi_document['name']
edi_document['note'] = edi_document.pop('notes', False)
edi_document['pricelist_id'] = self._edi_get_pricelist(cr, uid, partner_id, order_currency, context=context)
# discard web preview fields, if present
edi_document.pop('amount_total', None)
edi_document.pop('amount_tax', None)
edi_document.pop('amount_untaxed', None)
order_lines = edi_document['order_line']
for order_line in order_lines:
self._edi_requires_attributes(('product_id', 'product_uom', 'product_qty', 'price_unit'), order_line)
order_line['product_uom_qty'] = order_line['product_qty']
del order_line['product_qty']
# discard web preview fields, if present
order_line.pop('price_subtotal', None)
return super(sale_order,self).edi_import(cr, uid, edi_document, context=context)
def _edi_paypal_url(self, cr, uid, ids, field, arg, context=None):
res = dict.fromkeys(ids, False)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy in ('prepaid', 'manual') and \
order.company_id.paypal_account and order.state != 'draft':
params = {
"cmd": "_xclick",
"business": order.company_id.paypal_account,
"item_name": order.company_id.name + " Order " + order.name,
"invoice": order.name,
"amount": order.amount_total,
"currency_code": order.pricelist_id.currency_id.name,
"button_subtype": "services",
"no_note": "1",
"bn": "OpenERP_Order_PayNow_" + order.pricelist_id.currency_id.name,
}
res[order.id] = "https://www.paypal.com/cgi-bin/webscr?" + url_encode(params)
return res
_columns = {
'paypal_url': fields.function(_edi_paypal_url, type='char', string='Paypal Url'),
}
class sale_order_line(osv.osv, EDIMixin):
_inherit='sale.order.line'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Overridden to provide sale order line fields with the expected names
(sale and purchase orders have different column names)"""
edi_struct = dict(edi_struct or SALE_ORDER_LINE_EDI_STRUCT)
edi_doc_list = []
for line in records:
edi_doc = super(sale_order_line,self).edi_export(cr, uid, [line], edi_struct, context)[0]
edi_doc['__import_model'] = 'purchase.order.line'
edi_doc['product_qty'] = line.product_uom_qty
if line.product_uos:
edi_doc.update(product_uom=line.product_uos,
product_qty=line.product_uos_qty)
edi_doc_list.append(edi_doc)
return edi_doc_list
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
befair/gasistafelice | gasistafelice/rest/views/blocks/supplier_details.py | 3 | 2577 | """View for block details specialized for a GAS"""
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from django.core.urlresolvers import reverse
from django.conf import settings
from flexi_auth.models import ObjectWithContext
from ajax_select.fields import autoselect_fields_check_can_add
from consts import EDIT
from .base import ResourceBlockAction
import details
from gf.supplier.models import Supplier
from lib.shortcuts import render_to_context_response
from gf.supplier.forms import EditSupplierForm, SupplierRoleForm
import logging
log = logging.getLogger(__name__)
class Block(details.Block):
BLOCK_NAME = "supplier_details"
BLOCK_VALID_RESOURCE_TYPES = ["supplier"]
FORMCLASS_MANAGE_ROLES = SupplierRoleForm
def _get_user_actions(self, request):
"""Who can edit Supplier informations, has also the ability to configure it."""
user_actions = super(Block, self)._get_user_actions(request)
if request.user.has_perm(EDIT, obj=ObjectWithContext(request.resource)):
act_configure = ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name="configure", verbose_name=_("Configure"),
popup_form=True,
url = reverse('admin:supplier_supplierconfig_change', args=(request.resource.config.pk,))
)
for i,act in enumerate(user_actions):
# Change URL for action EDIT, insert "configure" action
if act.name == EDIT:
# act.url = reverse('admin:supplier_supplier_change', args=(request.resource.pk,))
user_actions.insert(i+1, act_configure)
break
user_actions += [
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name="export", verbose_name="GDXP",
popup_form=False,
url = "%s?%s" % (
reverse('gdxp.views.suppliers'),
"pk=%s&opt_catalog=1" % request.resource.pk
),
method="OPENURL"
),
]
return user_actions
def _get_edit_form_class(self):
form_class = EditSupplierForm
autoselect_fields_check_can_add(form_class, Supplier, self.request.user)
return form_class
| agpl-3.0 |
MythicApps/MythicAppsSite | MythicApps/settings.py | 1 | 3434 | """
Django settings for MythicApps project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import MythicApps.djangoCreds as djangoCreds
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(ra*6#gi6=!96$ij#tq#k_%6th46du6ksy704v7rri^z9l(6yt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'General',
'Applications',
'rest_framework',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'MythicApps.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MythicApps.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': djangoCreds.dbSettings
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
STATIC_DIRS = (
os.path.join(BASE_DIR, 'dirs'),
)
LOGGING= {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'file.log',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
}
}
if DEBUG:
# make all loggers use the console.
for logger in LOGGING['loggers']:
LOGGING['loggers'][logger]['handlers'] = ['console']
| gpl-2.0 |
balister/gnuradio | gr-digital/python/digital/qa_constellation_soft_decoder_cf.py | 40 | 7930 | #!/usr/bin/env python
#
# Copyright 2013-2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
from math import sqrt
from numpy import random, vectorize
class test_constellation_soft_decoder(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def helper_with_lut(self, prec, src_data, const_gen, const_sd_gen):
cnst_pts, code = const_gen()
Es = max([abs(c) for c in cnst_pts])
lut = digital.soft_dec_table_generator(const_sd_gen, prec, Es)
expected_result = list()
for s in src_data:
res = digital.calc_soft_dec_from_table(s, lut, prec, sqrt(2.0))
expected_result += res
cnst = digital.constellation_calcdist(cnst_pts, code, 2, 1)
cnst.set_soft_dec_lut(lut, int(prec))
src = blocks.vector_source_c(src_data)
op = digital.constellation_soft_decoder_cf(cnst.base())
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
actual_result = dst.data() # fetch the contents of the sink
#print "actual result", actual_result
#print "expected result", expected_result
self.assertFloatTuplesAlmostEqual(expected_result, actual_result, 5)
def helper_no_lut(self, prec, src_data, const_gen, const_sd_gen):
cnst_pts, code = const_gen()
cnst = digital.constellation_calcdist(cnst_pts, code, 2, 1)
expected_result = list()
for s in src_data:
res = digital.calc_soft_dec(s, cnst.points(), code)
expected_result += res
src = blocks.vector_source_c(src_data)
op = digital.constellation_soft_decoder_cf(cnst.base())
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
actual_result = dst.data() # fetch the contents of the sink
#print "actual result", actual_result
#print "expected result", expected_result
# Double vs. float precision issues between Python and C++, so
# use only 4 decimals in comparisons.
self.assertFloatTuplesAlmostEqual(expected_result, actual_result, 4)
def test_constellation_soft_decoder_cf_bpsk_3(self):
prec = 3
src_data = (-1.0 - 1.0j, 1.0 - 1.0j, -1.0 + 1.0j, 1.0 + 1.0j,
-2.0 - 2.0j, 2.0 - 2.0j, -2.0 + 2.0j, 2.0 + 2.0j,
-0.2 - 0.2j, 0.2 - 0.2j, -0.2 + 0.2j, 0.2 + 0.2j,
0.3 + 0.4j, 0.1 - 1.2j, -0.8 - 0.1j, -0.4 + 0.8j,
0.8 + 1.0j, -0.5 + 0.1j, 0.1 + 1.2j, -1.7 - 0.9j)
self.helper_with_lut(prec, src_data, digital.psk_2_0x0, digital.sd_psk_2_0x0)
def test_constellation_soft_decoder_cf_bpsk_8(self):
prec = 8
src_data = (-1.0 - 1.0j, 1.0 - 1.0j, -1.0 + 1.0j, 1.0 + 1.0j,
-2.0 - 2.0j, 2.0 - 2.0j, -2.0 + 2.0j, 2.0 + 2.0j,
-0.2 - 0.2j, 0.2 - 0.2j, -0.2 + 0.2j, 0.2 + 0.2j,
0.3 + 0.4j, 0.1 - 1.2j, -0.8 - 0.1j, -0.4 + 0.8j,
0.8 + 1.0j, -0.5 + 0.1j, 0.1 + 1.2j, -1.7 - 0.9j)
self.helper_with_lut(prec, src_data, digital.psk_2_0x0, digital.sd_psk_2_0x0)
def test_constellation_soft_decoder_cf_bpsk_8_rand(self):
prec = 8
src_data = vectorize(complex)(2*random.randn(100), 2*random.randn(100))
self.helper_with_lut(prec, src_data, digital.psk_2_0x0, digital.sd_psk_2_0x0)
def test_constellation_soft_decoder_cf_bpsk_8_rand2(self):
prec = 8
src_data = vectorize(complex)(2*random.randn(100), 2*random.randn(100))
self.helper_no_lut(prec, src_data, digital.psk_2_0x0, digital.sd_psk_2_0x0)
def test_constellation_soft_decoder_cf_qpsk_3(self):
prec = 3
src_data = (-1.0 - 1.0j, 1.0 - 1.0j, -1.0 + 1.0j, 1.0 + 1.0j,
-2.0 - 2.0j, 2.0 - 2.0j, -2.0 + 2.0j, 2.0 + 2.0j,
-0.2 - 0.2j, 0.2 - 0.2j, -0.2 + 0.2j, 0.2 + 0.2j,
0.3 + 0.4j, 0.1 - 1.2j, -0.8 - 0.1j, -0.4 + 0.8j,
0.8 + 1.0j, -0.5 + 0.1j, 0.1 + 1.2j, -1.7 - 0.9j)
self.helper_with_lut(prec, src_data, digital.psk_4_0x0_0_1, digital.sd_psk_4_0x0_0_1)
def test_constellation_soft_decoder_cf_qpsk_8(self):
prec = 8
src_data = (-1.0 - 1.0j, 1.0 - 1.0j, -1.0 + 1.0j, 1.0 + 1.0j,
-2.0 - 2.0j, 2.0 - 2.0j, -2.0 + 2.0j, 2.0 + 2.0j,
-0.2 - 0.2j, 0.2 - 0.2j, -0.2 + 0.2j, 0.2 + 0.2j,
0.3 + 0.4j, 0.1 - 1.2j, -0.8 - 0.1j, -0.4 + 0.8j,
0.8 + 1.0j, -0.5 + 0.1j, 0.1 + 1.2j, -1.7 - 0.9j)
self.helper_with_lut(prec, src_data, digital.psk_4_0x0_0_1, digital.sd_psk_4_0x0_0_1)
def test_constellation_soft_decoder_cf_qpsk_8_rand(self):
prec = 8
src_data = vectorize(complex)(2*random.randn(100), 2*random.randn(100))
self.helper_with_lut(prec, src_data, digital.psk_4_0x0_0_1, digital.sd_psk_4_0x0_0_1)
def test_constellation_soft_decoder_cf_qpsk_8_rand2(self):
prec = 8
src_data = vectorize(complex)(2*random.randn(100), 2*random.randn(100))
self.helper_no_lut(prec, src_data, digital.psk_4_0x0_0_1, digital.sd_psk_4_0x0_0_1)
def test_constellation_soft_decoder_cf_qam16_3(self):
prec = 3
src_data = (-1.0 - 1.0j, 1.0 - 1.0j, -1.0 + 1.0j, 1.0 + 1.0j,
-2.0 - 2.0j, 2.0 - 2.0j, -2.0 + 2.0j, 2.0 + 2.0j,
-0.2 - 0.2j, 0.2 - 0.2j, -0.2 + 0.2j, 0.2 + 0.2j,
0.3 + 0.4j, 0.1 - 1.2j, -0.8 - 0.1j, -0.4 + 0.8j,
0.8 + 1.0j, -0.5 + 0.1j, 0.1 + 1.2j, -1.7 - 0.9j)
self.helper_with_lut(prec, src_data, digital.qam_16_0x0_0_1_2_3, digital.sd_qam_16_0x0_0_1_2_3)
def test_constellation_soft_decoder_cf_qam16_8(self):
prec = 8
src_data = (-1.0 - 1.0j, 1.0 - 1.0j, -1.0 + 1.0j, 1.0 + 1.0j,
-2.0 - 2.0j, 2.0 - 2.0j, -2.0 + 2.0j, 2.0 + 2.0j,
-0.2 - 0.2j, 0.2 - 0.2j, -0.2 + 0.2j, 0.2 + 0.2j,
0.3 + 0.4j, 0.1 - 1.2j, -0.8 - 0.1j, -0.4 + 0.8j,
0.8 + 1.0j, -0.5 + 0.1j, 0.1 + 1.2j, -1.7 - 0.9j)
self.helper_with_lut(prec, src_data, digital.qam_16_0x0_0_1_2_3, digital.sd_qam_16_0x0_0_1_2_3)
def test_constellation_soft_decoder_cf_qam16_8_rand(self):
prec = 8
src_data = vectorize(complex)(2*random.randn(100), 2*random.randn(100))
self.helper_with_lut(prec, src_data, digital.qam_16_0x0_0_1_2_3, digital.sd_qam_16_0x0_0_1_2_3)
def test_constellation_soft_decoder_cf_qam16_8_rand2(self):
prec = 8
#src_data = vectorize(complex)(2*random.randn(100), 2*random.randn(100))
src_data = vectorize(complex)(2*random.randn(2), 2*random.randn(2))
self.helper_no_lut(prec, src_data, digital.qam_16_0x0_0_1_2_3, digital.sd_qam_16_0x0_0_1_2_3)
if __name__ == '__main__':
#gr_unittest.run(test_constellation_soft_decoder, "test_constellation_soft_decoder.xml")
gr_unittest.run(test_constellation_soft_decoder)
| gpl-3.0 |
endlessm/chromium-browser | v8/tools/v8_presubmit.py | 3 | 23806 | #!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
try:
import hashlib
md5er = hashlib.md5
except ImportError as e:
import md5
md5er = md5.new
import json
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
import pickle
import re
import sys
import subprocess
import multiprocessing
from subprocess import PIPE
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.local import utils
# Special LINT rules diverging from default and reason.
# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
# We now run our own header guard check in PRESUBMIT.py.
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
LINT_RULES = """
-build/header_guard
-build/include_what_you_use
-readability/fn_size
-readability/multiline_comment
-whitespace/comments
""".split()
LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
FLAGS_LINE = re.compile("//\s*Flags:.*--([A-z0-9-])+_[A-z0-9].*\n")
ASSERT_OPTIMIZED_PATTERN = re.compile("assertOptimized")
FLAGS_ENABLE_OPT = re.compile("//\s*Flags:.*--opt[^-].*\n")
ASSERT_UNOPTIMIZED_PATTERN = re.compile("assertUnoptimized")
FLAGS_NO_ALWAYS_OPT = re.compile("//\s*Flags:.*--no-?always-opt.*\n")
TOOLS_PATH = dirname(abspath(__file__))
def CppLintWorker(command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
process.wait()
out_lines = ""
error_count = -1
while True:
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
if error_count == -1:
print("Failed to process %s" % command.pop())
return 1
break
m = LINT_OUTPUT_PATTERN.match(out_line)
if m:
out_lines += out_line
error_count += 1
sys.stdout.write(out_lines)
return error_count
except KeyboardInterrupt:
process.kill()
except:
print('Error running cpplint.py. Please make sure you have depot_tools' +
' in your $PATH. Lint check skipped.')
process.kill()
def TorqueLintWorker(command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
process.wait()
out_lines = ""
error_count = 0
while True:
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
break
out_lines += out_line
error_count += 1
sys.stdout.write(out_lines)
if error_count != 0:
sys.stdout.write(
"warning: formatting and overwriting unformatted Torque files\n")
return error_count
except KeyboardInterrupt:
process.kill()
except:
print('Error running format-torque.py')
process.kill()
class FileContentsCache(object):
def __init__(self, sums_file_name):
self.sums = {}
self.sums_file_name = sums_file_name
def Load(self):
try:
sums_file = None
try:
sums_file = open(self.sums_file_name, 'r')
self.sums = pickle.load(sums_file)
except:
# Cannot parse pickle for any reason. Not much we can do about it.
pass
finally:
if sums_file:
sums_file.close()
def Save(self):
try:
sums_file = open(self.sums_file_name, 'w')
pickle.dump(self.sums, sums_file)
except:
# Failed to write pickle. Try to clean-up behind us.
if sums_file:
sums_file.close()
try:
os.unlink(self.sums_file_name)
except:
pass
finally:
sums_file.close()
def FilterUnchangedFiles(self, files):
changed_or_new = []
for file in files:
try:
handle = open(file, "r")
file_sum = md5er(handle.read()).digest()
if not file in self.sums or self.sums[file] != file_sum:
changed_or_new.append(file)
self.sums[file] = file_sum
finally:
handle.close()
return changed_or_new
def RemoveFile(self, file):
if file in self.sums:
self.sums.pop(file)
class SourceFileProcessor(object):
"""
Utility class that can run through a directory structure, find all relevant
files and invoke a custom check on the files.
"""
def RunOnPath(self, path):
"""Runs processor on all files under the given path."""
all_files = []
for file in self.GetPathsToSearch():
all_files += self.FindFilesIn(join(path, file))
return self.ProcessFiles(all_files)
def RunOnFiles(self, files):
"""Runs processor only on affected files."""
# Helper for getting directory pieces.
dirs = lambda f: dirname(f).split(os.sep)
# Path offsets where to look (to be in sync with RunOnPath).
# Normalize '.' to check for it with str.startswith.
search_paths = [('' if p == '.' else p) for p in self.GetPathsToSearch()]
all_files = [
f.AbsoluteLocalPath()
for f in files
if (not self.IgnoreFile(f.LocalPath()) and
self.IsRelevant(f.LocalPath()) and
all(not self.IgnoreDir(d) for d in dirs(f.LocalPath())) and
any(map(f.LocalPath().startswith, search_paths)))
]
return self.ProcessFiles(all_files)
def IgnoreDir(self, name):
return (name.startswith('.') or
name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken',
'octane', 'sunspider', 'traces-arm64'))
def IgnoreFile(self, name):
return name.startswith('.')
def FindFilesIn(self, path):
result = []
for (root, dirs, files) in os.walk(path):
for ignored in [x for x in dirs if self.IgnoreDir(x)]:
dirs.remove(ignored)
for file in files:
if not self.IgnoreFile(file) and self.IsRelevant(file):
result.append(join(root, file))
return result
class CacheableSourceFileProcessor(SourceFileProcessor):
"""Utility class that allows caching ProcessFiles() method calls.
In order to use it, create a ProcessFilesWithoutCaching method that returns
the files requiring intervention after processing the source files.
"""
def __init__(self, use_cache, cache_file_path, file_type):
self.use_cache = use_cache
self.cache_file_path = cache_file_path
self.file_type = file_type
def GetProcessorWorker(self):
"""Expected to return the worker function to run the formatter."""
raise NotImplementedError
def GetProcessorScript(self):
"""Expected to return a tuple
(path to the format processor script, list of arguments)."""
raise NotImplementedError
def GetProcessorCommand(self):
format_processor, options = self.GetProcessorScript()
if not format_processor:
print('Could not find the formatter for % files' % self.file_type)
sys.exit(1)
command = [sys.executable, format_processor]
command.extend(options)
return command
def ProcessFiles(self, files):
if self.use_cache:
cache = FileContentsCache(self.cache_file_path)
cache.Load()
files = cache.FilterUnchangedFiles(files)
if len(files) == 0:
print('No changes in %s files detected. Skipping check' % self.file_type)
return True
files_requiring_changes = self.DetectFilesToChange(files)
print (
'Total %s files found that require formatting: %d' %
(self.file_type, len(files_requiring_changes)))
if self.use_cache:
for file in files_requiring_changes:
cache.RemoveFile(file)
cache.Save()
return files_requiring_changes == []
def DetectFilesToChange(self, files):
command = self.GetProcessorCommand()
worker = self.GetProcessorWorker()
commands = [command + [file] for file in files]
count = multiprocessing.cpu_count()
pool = multiprocessing.Pool(count)
try:
results = pool.map_async(worker, commands).get(timeout=240)
except KeyboardInterrupt:
print("\nCaught KeyboardInterrupt, terminating workers.")
pool.terminate()
pool.join()
sys.exit(1)
unformatted_files = []
for index, errors in enumerate(results):
if errors > 0:
unformatted_files.append(files[index])
return unformatted_files
class CppLintProcessor(CacheableSourceFileProcessor):
"""
Lint files to check that they follow the google code style.
"""
def __init__(self, use_cache=True):
super(CppLintProcessor, self).__init__(
use_cache=use_cache, cache_file_path='.cpplint-cache', file_type='C/C++')
def IsRelevant(self, name):
return name.endswith('.cc') or name.endswith('.h')
def IgnoreDir(self, name):
return (super(CppLintProcessor, self).IgnoreDir(name)
or (name == 'third_party'))
IGNORE_LINT = [
'export-template.h',
'flag-definitions.h',
'gay-fixed.cc',
'gay-precision.cc',
'gay-shortest.cc',
]
def IgnoreFile(self, name):
return (super(CppLintProcessor, self).IgnoreFile(name)
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
dirs = ['include', 'samples', 'src']
test_dirs = ['cctest', 'common', 'fuzzer', 'inspector', 'unittests']
return dirs + [join('test', dir) for dir in test_dirs]
def GetProcessorWorker(self):
return CppLintWorker
def GetProcessorScript(self):
filters = ','.join([n for n in LINT_RULES])
arguments = ['--filter', filters]
for path in [TOOLS_PATH] + os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
cpplint = os.path.join(path, 'cpplint.py')
if os.path.isfile(cpplint):
return cpplint, arguments
return None, arguments
class TorqueLintProcessor(CacheableSourceFileProcessor):
"""
Check .tq files to verify they follow the Torque style guide.
"""
def __init__(self, use_cache=True):
super(TorqueLintProcessor, self).__init__(
use_cache=use_cache, cache_file_path='.torquelint-cache',
file_type='Torque')
def IsRelevant(self, name):
return name.endswith('.tq')
def GetPathsToSearch(self):
dirs = ['third_party', 'src']
test_dirs = ['torque']
return dirs + [join('test', dir) for dir in test_dirs]
def GetProcessorWorker(self):
return TorqueLintWorker
def GetProcessorScript(self):
torque_tools = os.path.join(TOOLS_PATH, "torque")
torque_path = os.path.join(torque_tools, "format-torque.py")
arguments = ["-il"]
if os.path.isfile(torque_path):
return torque_path, arguments
return None, arguments
COPYRIGHT_HEADER_PATTERN = re.compile(
r'Copyright [\d-]*20[0-2][0-9] the V8 project authors. All rights reserved.')
class SourceProcessor(SourceFileProcessor):
"""
Check that all files include a copyright notice and no trailing whitespaces.
"""
RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', '.status', '.tq', '.g4']
def __init__(self):
self.runtime_function_call_pattern = self.CreateRuntimeFunctionCallMatcher()
def CreateRuntimeFunctionCallMatcher(self):
runtime_h_path = join(dirname(TOOLS_PATH), 'src/runtime/runtime.h')
pattern = re.compile(r'\s+F\(([^,]*),.*\)')
runtime_functions = []
with open(runtime_h_path) as f:
for line in f.readlines():
m = pattern.match(line)
if m:
runtime_functions.append(m.group(1))
if len(runtime_functions) < 250:
print ("Runtime functions list is suspiciously short. "
"Consider updating the presubmit script.")
sys.exit(1)
str = '(\%\s+(' + '|'.join(runtime_functions) + '))[\s\(]'
return re.compile(str)
# Overwriting the one in the parent class.
def FindFilesIn(self, path):
if os.path.exists(path+'/.git'):
output = subprocess.Popen('git ls-files --full-name',
stdout=PIPE, cwd=path, shell=True)
result = []
for file in output.stdout.read().split():
for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
if self.IgnoreDir(dir_part):
break
else:
if (self.IsRelevant(file) and os.path.exists(file)
and not self.IgnoreFile(file)):
result.append(join(path, file))
if output.wait() == 0:
return result
return super(SourceProcessor, self).FindFilesIn(path)
def IsRelevant(self, name):
for ext in SourceProcessor.RELEVANT_EXTENSIONS:
if name.endswith(ext):
return True
return False
def GetPathsToSearch(self):
return ['.']
def IgnoreDir(self, name):
return (super(SourceProcessor, self).IgnoreDir(name) or
name in ('third_party', 'out', 'obj', 'DerivedSources'))
IGNORE_COPYRIGHTS = ['box2d.js',
'cpplint.py',
'copy.js',
'corrections.js',
'crypto.js',
'daemon.py',
'earley-boyer.js',
'fannkuch.js',
'fasta.js',
'injected-script.cc',
'injected-script.h',
'libraries.cc',
'libraries-empty.cc',
'lua_binarytrees.js',
'meta-123.js',
'memops.js',
'poppler.js',
'primes.js',
'raytrace.js',
'regexp-pcre.js',
'resources-123.js',
'sqlite.js',
'sqlite-change-heap.js',
'sqlite-pointer-masking.js',
'sqlite-safe-heap.js',
'v8-debugger-script.h',
'v8-inspector-impl.cc',
'v8-inspector-impl.h',
'v8-runtime-agent-impl.cc',
'v8-runtime-agent-impl.h',
'gnuplot-4.6.3-emscripten.js',
'zlib.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
IGNORE_COPYRIGHTS_DIRECTORIES = [
"test/test262/local-tests",
"test/mjsunit/wasm/bulk-memory-spec",
]
def EndOfDeclaration(self, line):
return line == "}" or line == "};"
def StartOfDeclaration(self, line):
return line.find("//") == 0 or \
line.find("/*") == 0 or \
line.find(") {") != -1
def ProcessContents(self, name, contents):
result = True
base = basename(name)
if not base in SourceProcessor.IGNORE_TABS:
if '\t' in contents:
print("%s contains tabs" % name)
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS and \
not any(ignore_dir in name for ignore_dir
in SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORIES):
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print("%s is missing a correct copyright header." % name)
result = False
if ' \n' in contents or contents.endswith(' '):
line = 0
lines = []
parts = contents.split(' \n')
if not contents.endswith(' '):
parts.pop()
for part in parts:
line += part.count('\n') + 1
lines.append(str(line))
linenumbers = ', '.join(lines)
if len(lines) > 1:
print("%s has trailing whitespaces in lines %s." % (name, linenumbers))
else:
print("%s has trailing whitespaces in line %s." % (name, linenumbers))
result = False
if not contents.endswith('\n') or contents.endswith('\n\n'):
print("%s does not end with a single new line." % name)
result = False
# Sanitize flags for fuzzer.
if (".js" in name or ".mjs" in name) and ("mjsunit" in name or "debugger" in name):
match = FLAGS_LINE.search(contents)
if match:
print("%s Flags should use '-' (not '_')" % name)
result = False
if (not "mjsunit/mjsunit.js" in name and
not "mjsunit/mjsunit_numfuzz.js" in name):
if ASSERT_OPTIMIZED_PATTERN.search(contents) and \
not FLAGS_ENABLE_OPT.search(contents):
print("%s Flag --opt should be set if " \
"assertOptimized() is used" % name)
result = False
if ASSERT_UNOPTIMIZED_PATTERN.search(contents) and \
not FLAGS_NO_ALWAYS_OPT.search(contents):
print("%s Flag --no-always-opt should be set if " \
"assertUnoptimized() is used" % name)
result = False
match = self.runtime_function_call_pattern.search(contents)
if match:
print("%s has unexpected spaces in a runtime call '%s'" % (name, match.group(1)))
result = False
return result
def ProcessFiles(self, files):
success = True
violations = 0
for file in files:
try:
handle = open(file)
contents = handle.read()
if len(contents) > 0 and not self.ProcessContents(file, contents):
success = False
violations += 1
finally:
handle.close()
print("Total violating files: %s" % violations)
return success
def _CheckStatusFileForDuplicateKeys(filepath):
comma_space_bracket = re.compile(", *]")
lines = []
with open(filepath) as f:
for line in f.readlines():
# Skip all-comment lines.
if line.lstrip().startswith("#"): continue
# Strip away comments at the end of the line.
comment_start = line.find("#")
if comment_start != -1:
line = line[:comment_start]
line = line.strip()
# Strip away trailing commas within the line.
line = comma_space_bracket.sub("]", line)
if len(line) > 0:
lines.append(line)
# Strip away trailing commas at line ends. Ugh.
for i in range(len(lines) - 1):
if (lines[i].endswith(",") and len(lines[i + 1]) > 0 and
lines[i + 1][0] in ("}", "]")):
lines[i] = lines[i][:-1]
contents = "\n".join(lines)
# JSON wants double-quotes.
contents = contents.replace("'", '"')
# Fill in keywords (like PASS, SKIP).
for key in statusfile.KEYWORDS:
contents = re.sub(r"\b%s\b" % key, "\"%s\"" % key, contents)
status = {"success": True}
def check_pairs(pairs):
keys = {}
for key, value in pairs:
if key in keys:
print("%s: Error: duplicate key %s" % (filepath, key))
status["success"] = False
keys[key] = True
json.loads(contents, object_pairs_hook=check_pairs)
return status["success"]
class StatusFilesProcessor(SourceFileProcessor):
"""Checks status files for incorrect syntax and duplicate keys."""
def IsRelevant(self, name):
# Several changes to files under the test directories could impact status
# files.
return True
def GetPathsToSearch(self):
return ['test', 'tools/testrunner']
def ProcessFiles(self, files):
success = True
for status_file_path in sorted(self._GetStatusFiles(files)):
success &= statusfile.PresubmitCheck(status_file_path)
success &= _CheckStatusFileForDuplicateKeys(status_file_path)
return success
def _GetStatusFiles(self, files):
test_path = join(dirname(TOOLS_PATH), 'test')
testrunner_path = join(TOOLS_PATH, 'testrunner')
status_files = set()
for file_path in files:
if file_path.startswith(testrunner_path):
for suitepath in os.listdir(test_path):
suitename = os.path.basename(suitepath)
status_file = os.path.join(
test_path, suitename, suitename + ".status")
if os.path.exists(status_file):
status_files.add(status_file)
return status_files
for file_path in files:
if file_path.startswith(test_path):
# Strip off absolute path prefix pointing to test suites.
pieces = file_path[len(test_path):].lstrip(os.sep).split(os.sep)
if pieces:
# Infer affected status file name. Only care for existing status
# files. Some directories under "test" don't have any.
if not os.path.isdir(join(test_path, pieces[0])):
continue
status_file = join(test_path, pieces[0], pieces[0] + ".status")
if not os.path.exists(status_file):
continue
status_files.add(status_file)
return status_files
def CheckDeps(workspace):
checkdeps_py = join(workspace, 'buildtools', 'checkdeps', 'checkdeps.py')
return subprocess.call([sys.executable, checkdeps_py, workspace]) == 0
def PyTests(workspace):
result = True
for script in [
join(workspace, 'tools', 'clusterfuzz', 'v8_foozzie_test.py'),
join(workspace, 'tools', 'release', 'test_scripts.py'),
join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
join(workspace, 'tools', 'unittests', 'run_perf_test.py'),
join(workspace, 'tools', 'testrunner', 'testproc', 'variant_unittest.py'),
]:
print('Running ' + script)
result &= subprocess.call(
[sys.executable, script], stdout=subprocess.PIPE) == 0
return result
def GetOptions():
result = optparse.OptionParser()
result.add_option('--no-lint', help="Do not run cpplint", default=False,
action="store_true")
result.add_option('--no-linter-cache', help="Do not cache linter results",
default=False, action="store_true")
return result
def Main():
workspace = abspath(join(dirname(sys.argv[0]), '..'))
parser = GetOptions()
(options, args) = parser.parse_args()
success = True
print("Running checkdeps...")
success &= CheckDeps(workspace)
use_linter_cache = not options.no_linter_cache
if not options.no_lint:
print("Running C++ lint check...")
success &= CppLintProcessor(use_cache=use_linter_cache).RunOnPath(workspace)
print("Running Torque formatting check...")
success &= TorqueLintProcessor(use_cache=use_linter_cache).RunOnPath(
workspace)
print("Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check...")
success &= SourceProcessor().RunOnPath(workspace)
print("Running status-files check...")
success &= StatusFilesProcessor().RunOnPath(workspace)
print("Running python tests...")
success &= PyTests(workspace)
if success:
return 0
else:
return 1
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/scons-2.0.1/engine/SCons/exitfuncs.py | 61 | 2402 | """SCons.exitfuncs
Register functions which are executed when SCons exits for any reason.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/exitfuncs.py 5134 2010/08/16 23:02:40 bdeegan"
_exithandlers = []
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
func(*targs, **kargs)
def register(func, *targs, **kargs):
"""register a function to be executed upon normal program termination
func - function to be called at exit
targs - optional arguments to pass to func
kargs - optional keyword arguments to pass to func
"""
_exithandlers.append((func, targs, kargs))
import sys
try:
x = sys.exitfunc
# if x isn't our own exit func executive, assume it's another
# registered exit function - append it to our list...
if x != _run_exitfuncs:
register(x)
except AttributeError:
pass
# make our exit function get run by python when it exits:
sys.exitfunc = _run_exitfuncs
del sys
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
LighthouseUK/koalacore | koalacore/api.py | 1 | 16622 | # -*- coding: utf-8 -*-
"""
koala.api
~~~~~~~~~~~~~~~~~~
Contains base implementations for building an internal project API
:copyright: (c) 2015 Lighthouse
:license: LGPL
"""
from blinker import signal
from google.appengine.ext import deferred
__author__ = 'Matt Badger'
# TODO: remove the deferred library dependency; extend the BaseAPI in an App Engine specific module to include deferred.
# TODO: it is possible that these methods will fail and thus their result will be None. Passing this in a signal may
# cause other functions to throw exceptions. Check the return value before processing the post_ signals?
# Result should always be the first argument to the post_ signals. That way the receivers can check the value before
# continuing execution.
class BaseAPI(object):
_api_name = ''
_api_model = None
_datastore_interface = None
_search_interface = None
@classmethod
def new(cls, **kwargs):
return cls._api_model(**kwargs)
@classmethod
def insert(cls, resource_object, auth_uid=None, **kwargs):
if signal('pre_insert').has_receivers_for(cls):
signal('pre_insert').send(cls, resource_object=resource_object, auth_uid=auth_uid, **kwargs)
resource_uid = cls._datastore_interface.insert(resource_object=resource_object, **kwargs)
deferred.defer(cls._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_insert').has_receivers_for(cls):
signal('post_insert').send(cls, result=resource_uid, resource_uid=resource_uid,
resource_object=resource_object, auth_uid=auth_uid, **kwargs)
return resource_uid
@classmethod
def get(cls, resource_uid, **kwargs):
if signal('pre_get').has_receivers_for(cls):
signal('pre_get').send(cls, resource_uid=resource_uid, **kwargs)
resource = cls._datastore_interface.get(resource_uid=resource_uid)
if signal('post_get').has_receivers_for(cls):
signal('post_get').send(cls, result=resource, resource_uid=resource_uid, **kwargs)
return resource
@classmethod
def update(cls, resource_object, auth_uid=None, **kwargs):
if signal('pre_update').has_receivers_for(cls):
signal('pre_update').send(cls, resource_object=resource_object, auth_uid=auth_uid, **kwargs)
resource_uid = cls._datastore_interface.update(resource_object=resource_object, **kwargs)
deferred.defer(cls._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_update').has_receivers_for(cls):
signal('post_update').send(cls, result=resource_uid, resource_uid=resource_uid,
resource_object=resource_object, auth_uid=auth_uid, **kwargs)
return resource_uid
@classmethod
def patch(cls, resource_uid, delta_update, auth_uid=None, **kwargs):
if signal('pre_patch').has_receivers_for(cls):
signal('pre_patch').send(cls, resource_uid=resource_uid, delta_update=delta_update, auth_uid=auth_uid,
**kwargs)
resource_uid = cls._datastore_interface.patch(resource_uid=resource_uid, delta_update=delta_update, **kwargs)
deferred.defer(cls._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_patch').has_receivers_for(cls):
signal('post_patch').send(cls, result=resource_uid, resource_uid=resource_uid, delta_update=delta_update,
auth_uid=auth_uid, **kwargs)
return resource_uid
@classmethod
def delete(cls, resource_uid, auth_uid=None, **kwargs):
if signal('pre_delete').has_receivers_for(cls):
signal('pre_delete').send(cls, resource_uid=resource_uid, auth_uid=auth_uid, **kwargs)
cls._datastore_interface.delete(resource_uid=resource_uid, **kwargs)
deferred.defer(cls._delete_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_delete').has_receivers_for(cls):
signal('post_delete').send(cls, result=None, resource_uid=resource_uid, auth_uid=auth_uid, **kwargs)
@classmethod
def search(cls, query_string, **kwargs):
if signal('pre_search').has_receivers_for(cls):
signal('pre_search').send(cls, query_string=query_string, **kwargs)
search_result = cls._search_interface.search(query_string=query_string, **kwargs)
if signal('post_search').has_receivers_for(cls):
signal('post_search').send(cls, result=search_result, query_string=query_string, **kwargs)
return search_result
@classmethod
def _update_search_index(cls, resource_uid, **kwargs):
resource = cls.get(resource_uid=resource_uid)
cls._search_interface.insert(resource_object=resource, **kwargs)
@classmethod
def _delete_search_index(cls, resource_uid, **kwargs):
cls._search_interface.delete(resource_object_uid=resource_uid, **kwargs)
class BaseSubAPI(object):
_api_name = ''
_parent_api = None
_allowed_patch_keys = set()
@classmethod
def _parse_patch_keys(cls, delta_update):
delta_keys = set(delta_update.keys())
unauthorized_keys = delta_keys - cls._allowed_patch_keys
if unauthorized_keys:
raise ValueError(u'Cannot perform patch as "{}" are unauthorized keys'.format(unauthorized_keys))
@classmethod
def patch(cls, resource_uid, delta_update, **kwargs):
cls._parse_patch_keys(delta_update=delta_update)
if signal('pre_patch').has_receivers_for(cls):
signal('pre_patch').send(cls, resource_uid=resource_uid, delta_update=delta_update, **kwargs)
resource_uid = cls._parent_api._datastore_interface.patch(resource_uid=resource_uid, delta_update=delta_update,
**kwargs)
deferred.defer(cls._parent_api._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_patch').has_receivers_for(cls):
signal('post_patch').send(cls, result=resource_uid, resource_uid=resource_uid, delta_update=delta_update,
**kwargs)
return resource_uid
class BaseResourceProperty(object):
"""A data descriptor that sets and returns values normally but also includes a title attribute and assorted filters.
You can inherit from this class to create custom property types
"""
_name = None
_default = None
title = None
_attributes = ['_name', '_default', 'title']
_positional = 1 # Only name is a positional argument.
def __init__(self, name=None, default=None, title=''):
self._name = name # name should conform to python class attribute naming conventions
self._default = default
self.title = title
def __repr__(self):
"""Return a compact unambiguous string representation of a property."""
args = []
cls = self.__class__
for i, attr in enumerate(self._attributes):
val = getattr(self, attr)
if val is not getattr(cls, attr):
if isinstance(val, type):
s = val.__name__
else:
s = repr(val)
if i >= cls._positional:
if attr.startswith('_'):
attr = attr[1:]
s = '%s=%s' % (attr, s)
args.append(s)
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
def __get__(self, entity, unused_cls=None):
"""Descriptor protocol: get the value from the entity."""
if entity is None:
return self # __get__ called on class
return entity._values.get(self._name, self._default)
def __set__(self, entity, value):
"""Descriptor protocol: set the value on the entity."""
entity._values[self._name] = value
def _fix_up(self, cls, code_name):
"""Internal helper called to tell the property its name.
This is called by _fix_up_properties() which is called by
MetaModel when finishing the construction of a Model subclass.
The name passed in is the name of the class attribute to which the
Property is assigned (a.k.a. the code name). Note that this means
that each Property instance must be assigned to (at most) one
class attribute. E.g. to declare three strings, you must call
StringProperty() three times, you cannot write
foo = bar = baz = StringProperty()
"""
if self._name is None:
self._name = code_name
def _has_value(self, entity, unused_rest=None):
"""Internal helper to ask if the entity has a value for this Property."""
return self._name in entity._values
class ResourceProperty(BaseResourceProperty):
_attributes = BaseResourceProperty._attributes + ['_immutable', '_unique', '_strip', '_lower']
def __init__(self, immutable=False, unique=False, track_revisions=True, strip_whitespace=True,
force_lowercase=False, **kwargs):
super(ResourceProperty, self).__init__(**kwargs)
self._immutable = immutable
self._unique = unique
self._track_revisions = track_revisions
self._strip = strip_whitespace
self._lower = force_lowercase
def __set__(self, entity, value):
"""Descriptor protocol: set the value on the entity."""
if entity._init_complete:
if self._immutable:
raise AssertionError('"{}" is immutable.'.format(self._name))
if self._strip:
if value is not None:
if hasattr(value, 'strip'):
value = value.strip()
elif isinstance(value, list):
try:
value = [item.strip() for item in value]
except AttributeError:
# The value cannot simply be stripped. Custom formatting should be used in a dedicated method.
pass
elif isinstance(value, set):
value_list = list(value)
try:
value = set([item.strip() for item in value_list])
except AttributeError:
# The value cannot simply be stripped. Custom formatting should be used in a dedicated method.
pass
if self._lower:
if value is not None:
if hasattr(value, 'lower'):
value = value.lower()
elif isinstance(value, list):
try:
value = [item.lower() for item in value]
except AttributeError:
# The value cannot simply be lowered. Custom formatting should be used in a dedicated method.
pass
if entity._init_complete:
if self._unique:
entity._uniques_modified.append(self._name)
if self._track_revisions:
if self._name in entity._history:
entity._history[self._name] = (entity._history[self._name][0], value)
else:
entity._history[self._name] = (getattr(entity, self._name, None), value)
super(ResourceProperty, self).__set__(entity=entity, value=value)
class ComputedResourceProperty(BaseResourceProperty):
_attributes = BaseResourceProperty._attributes + ['_compute_function']
def __init__(self, compute_function, **kwargs):
super(ComputedResourceProperty, self).__init__(**kwargs)
self._compute_function = compute_function
def __get__(self, entity, unused_cls=None):
"""Descriptor protocol: get the value from the entity."""
if entity is None:
return self # __get__ called on class
return self._compute_function(entity)
class MetaModel(type):
"""Metaclass for Model.
This exists to fix up the properties -- they need to know their name.
This is accomplished by calling the class's _fix_properties() method.
Note: This class is derived from Google's NDB MetaModel (line 2838 in model.py)
"""
def __init__(cls, name, bases, classdict):
super(MetaModel, cls).__init__(name, bases, classdict)
cls._fix_up_properties()
def __repr__(cls):
props = []
for _, prop in sorted(cls._properties.iteritems()):
props.append('%s=%r' % (prop._code_name, prop))
return '%s<%s>' % (cls.__name__, ', '.join(props))
class BaseResource(object):
"""
Base resource object. You have to implement some of the functionality yourself.
You must call super(Resource, self).__init__() first in your init method.
Immutable properties must be set within init otherwise it makes it impossible to set initial values.
If a property is required then make sure that you check it during init and throw an exception.
"""
__metaclass__ = MetaModel
_properties = None
_uniques = None
def __init__(self, **kwargs):
self._init_complete = False
self._values = {}
self._uniques_modified = []
self._history = {}
self._set_attributes(kwargs)
self._init_complete = True
def _set_attributes(self, kwds):
"""Internal helper to set attributes from keyword arguments.
Expando overrides this.
"""
cls = self.__class__
for name, value in kwds.iteritems():
prop = getattr(cls, name) # Raises AttributeError for unknown properties.
if not isinstance(prop, BaseResourceProperty):
raise TypeError('Cannot set non-property %s' % name)
prop.__set__(self, value)
def __repr__(self):
"""Return an unambiguous string representation of an entity."""
args = []
for prop in self._properties.itervalues():
if prop._has_value(self):
val = prop.__get__(self)
if val is None:
rep = 'None'
else:
rep = val
args.append('%s=%s' % (prop._name, rep))
args.sort()
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
def _as_dict(self):
"""Return a dict containing the entity's property values.
"""
return self._values.copy()
as_dict = _as_dict
@classmethod
def _fix_up_properties(cls):
"""Fix up the properties by calling their _fix_up() method.
Note: This is called by MetaModel, but may also be called manually
after dynamically updating a model class.
"""
cls._properties = {} # Map of {name: Property}
cls._uniques = [] # Map of {name: Property}
if cls.__module__ == __name__: # Skip the classes in *this* file.
return
for name in set(dir(cls)):
attr = getattr(cls, name, None)
if isinstance(attr, BaseResourceProperty):
if name.startswith('_'):
raise TypeError('ModelAttribute %s cannot begin with an underscore '
'character. _ prefixed attributes are reserved for '
'temporary Model instance values.' % name)
attr._fix_up(cls, name)
cls._properties[attr._name] = attr
try:
if attr._unique:
cls._uniques.append(attr._name)
except AttributeError:
pass
class Resource(BaseResource):
"""
Default implementation of a resource. It handles uid, created and updated properties. The latter two are simply
timestamps.
Due to the way these objects are used, the properties cannot be mandatory. For example, the uid may be set by the
datastore on insert. Same goes for the timestamps.
"""
# name=None, default=None, title='', immutable=False, unique=False, track_revisions=True, strip_whitespace=True, force_lowercase=False
uid = ResourceProperty(title=u'UID', immutable=True, track_revisions=False)
created = ResourceProperty(title=u'Created', immutable=True, track_revisions=False)
updated = ResourceProperty(title=u'Updated', immutable=True, track_revisions=False)
| lgpl-3.0 |
super7ramp/pulseaudio-dlna | pulseaudio_dlna/notification.py | 9 | 1344 | #!/usr/bin/python
# This file is part of pulseaudio-dlna.
# pulseaudio-dlna is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pulseaudio-dlna is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pulseaudio-dlna. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import logging
import notify2
logger = logging.getLogger('pulseaudio_dlna.notification')
def show(title, message, icon=''):
try:
notice = notify2.Notification(title, message, icon)
notice.set_timeout(notify2.EXPIRES_DEFAULT)
notice.show()
except:
logger.info(
'notify2 failed to display: {title} - {message}'.format(
title=title,
message=message))
try:
notify2.init('pulseaudio_dlna')
except:
logger.error('notify2 could not be initialized! Notifications will '
'most likely not work.')
| gpl-3.0 |
liamstask/c-capnproto | gtest-1.7.0/test/gtest_output_test.py | 1733 | 12005 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| mit |
JamesMura/sentry | src/sentry/api/endpoints/group_notes.py | 6 | 2146 | from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from rest_framework import status
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.group import GroupEndpoint
from sentry.api.serializers import serialize
from sentry.api.serializers.rest_framework.group_notes import NoteSerializer
from sentry.models import Activity, GroupSubscription, GroupSubscriptionReason
from sentry.utils.functional import extract_lazy_object
class GroupNotesEndpoint(GroupEndpoint):
doc_section = DocSection.EVENTS
def get(self, request, group):
notes = Activity.objects.filter(
group=group,
type=Activity.NOTE,
).select_related('user')
return self.paginate(
request=request,
queryset=notes,
# TODO(dcramer): we want to sort by datetime
order_by='-id',
on_results=lambda x: serialize(x, request.user),
)
def post(self, request, group):
serializer = NoteSerializer(data=request.DATA)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
data = dict(serializer.object)
if Activity.objects.filter(
group=group,
type=Activity.NOTE,
user=request.user,
data=data,
datetime__gte=timezone.now() - timedelta(hours=1)
).exists():
return Response('{"detail": "You have already posted that comment."}',
status=status.HTTP_400_BAD_REQUEST)
GroupSubscription.objects.subscribe(
group=group,
user=request.user,
reason=GroupSubscriptionReason.comment,
)
activity = Activity.objects.create(
group=group,
project=group.project,
type=Activity.NOTE,
user=extract_lazy_object(request.user),
data=data,
)
activity.send_notification()
return Response(serialize(activity, request.user), status=201)
| bsd-3-clause |
HonzaKral/django | tests/template_tests/filter_tests/test_truncatewords_html.py | 386 | 1607 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import truncatewords_html
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_truncate_zero(self):
self.assertEqual(truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 0), '')
def test_truncate(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 2),
'<p>one <a href="#">two ...</a></p>',
)
def test_truncate2(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 4),
'<p>one <a href="#">two - three <br>four ...</a></p>',
)
def test_truncate3(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 5),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate4(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate_unicode(self):
self.assertEqual(truncatewords_html('\xc5ngstr\xf6m was here', 1), '\xc5ngstr\xf6m ...')
def test_truncate_complex(self):
self.assertEqual(
truncatewords_html('<i>Buenos días! ¿Cómo está?</i>', 3),
'<i>Buenos días! ¿Cómo ...</i>',
)
| bsd-3-clause |
kvar/ansible | lib/ansible/modules/cloud/cloudstack/cs_zone.py | 24 | 9947 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_zone
short_description: Manages zones on Apache CloudStack based clouds.
description:
- Create, update and remove zones.
version_added: '2.1'
author: René Moser (@resmo)
options:
name:
description:
- Name of the zone.
type: str
required: true
id:
description:
- uuid of the existing zone.
type: str
state:
description:
- State of the zone.
type: str
default: present
choices: [ present, enabled, disabled, absent ]
domain:
description:
- Domain the zone is related to.
- Zone is a public zone if not set.
type: str
network_domain:
description:
- Network domain for the zone.
type: str
network_type:
description:
- Network type of the zone.
type: str
default: Basic
choices: [ Basic, Advanced ]
dns1:
description:
- First DNS for the zone.
- Required if I(state=present)
type: str
dns2:
description:
- Second DNS for the zone.
type: str
internal_dns1:
description:
- First internal DNS for the zone.
- If not set I(dns1) will be used on I(state=present).
type: str
internal_dns2:
description:
- Second internal DNS for the zone.
type: str
dns1_ipv6:
description:
- First DNS for IPv6 for the zone.
type: str
dns2_ipv6:
description:
- Second DNS for IPv6 for the zone.
type: str
guest_cidr_address:
description:
- Guest CIDR address for the zone.
type: str
dhcp_provider:
description:
- DHCP provider for the Zone.
type: str
local_storage_enabled:
description:
- Whether to enable local storage for the zone or not..
type: bool
securitygroups_enabled:
description:
- Whether the zone is security group enabled or not.
type: bool
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Ensure a zone is present
cs_zone:
name: ch-zrh-ix-01
dns1: 8.8.8.8
dns2: 8.8.4.4
network_type: basic
delegate_to: localhost
- name: Ensure a zone is disabled
cs_zone:
name: ch-zrh-ix-01
state: disabled
delegate_to: localhost
- name: Ensure a zone is enabled
cs_zone:
name: ch-zrh-ix-01
state: enabled
delegate_to: localhost
- name: Ensure a zone is absent
cs_zone:
name: ch-zrh-ix-01
state: absent
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the zone.
returned: success
type: str
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the zone.
returned: success
type: str
sample: zone01
dns1:
description: First DNS for the zone.
returned: success
type: str
sample: 8.8.8.8
dns2:
description: Second DNS for the zone.
returned: success
type: str
sample: 8.8.4.4
internal_dns1:
description: First internal DNS for the zone.
returned: success
type: str
sample: 8.8.8.8
internal_dns2:
description: Second internal DNS for the zone.
returned: success
type: str
sample: 8.8.4.4
dns1_ipv6:
description: First IPv6 DNS for the zone.
returned: success
type: str
sample: "2001:4860:4860::8888"
dns2_ipv6:
description: Second IPv6 DNS for the zone.
returned: success
type: str
sample: "2001:4860:4860::8844"
allocation_state:
description: State of the zone.
returned: success
type: str
sample: Enabled
domain:
description: Domain the zone is related to.
returned: success
type: str
sample: ROOT
network_domain:
description: Network domain for the zone.
returned: success
type: str
sample: example.com
network_type:
description: Network type for the zone.
returned: success
type: str
sample: basic
local_storage_enabled:
description: Local storage offering enabled.
returned: success
type: bool
sample: false
securitygroups_enabled:
description: Security groups support is enabled.
returned: success
type: bool
sample: false
guest_cidr_address:
description: Guest CIDR address for the zone
returned: success
type: str
sample: 10.1.1.0/24
dhcp_provider:
description: DHCP provider for the zone
returned: success
type: str
sample: VirtualRouter
zone_token:
description: Zone token
returned: success
type: str
sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7
tags:
description: List of resource tags associated with the zone.
returned: success
type: dict
sample: [ { "key": "foo", "value": "bar" } ]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackZone(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackZone, self).__init__(module)
self.returns = {
'dns1': 'dns1',
'dns2': 'dns2',
'internaldns1': 'internal_dns1',
'internaldns2': 'internal_dns2',
'ipv6dns1': 'dns1_ipv6',
'ipv6dns2': 'dns2_ipv6',
'domain': 'network_domain',
'networktype': 'network_type',
'securitygroupsenabled': 'securitygroups_enabled',
'localstorageenabled': 'local_storage_enabled',
'guestcidraddress': 'guest_cidr_address',
'dhcpprovider': 'dhcp_provider',
'allocationstate': 'allocation_state',
'zonetoken': 'zone_token',
}
self.zone = None
def _get_common_zone_args(self):
args = {
'name': self.module.params.get('name'),
'dns1': self.module.params.get('dns1'),
'dns2': self.module.params.get('dns2'),
'internaldns1': self.get_or_fallback('internal_dns1', 'dns1'),
'internaldns2': self.get_or_fallback('internal_dns2', 'dns2'),
'ipv6dns1': self.module.params.get('dns1_ipv6'),
'ipv6dns2': self.module.params.get('dns2_ipv6'),
'networktype': self.module.params.get('network_type'),
'domain': self.module.params.get('network_domain'),
'localstorageenabled': self.module.params.get('local_storage_enabled'),
'guestcidraddress': self.module.params.get('guest_cidr_address'),
'dhcpprovider': self.module.params.get('dhcp_provider'),
}
state = self.module.params.get('state')
if state in ['enabled', 'disabled']:
args['allocationstate'] = state.capitalize()
return args
def get_zone(self):
if not self.zone:
args = {}
uuid = self.module.params.get('id')
if uuid:
args['id'] = uuid
zones = self.query_api('listZones', **args)
if zones:
self.zone = zones['zone'][0]
return self.zone
args['name'] = self.module.params.get('name')
zones = self.query_api('listZones', **args)
if zones:
self.zone = zones['zone'][0]
return self.zone
def present_zone(self):
zone = self.get_zone()
if zone:
zone = self._update_zone()
else:
zone = self._create_zone()
return zone
def _create_zone(self):
required_params = [
'dns1',
]
self.module.fail_on_missing_params(required_params=required_params)
self.result['changed'] = True
args = self._get_common_zone_args()
args['domainid'] = self.get_domain(key='id')
args['securitygroupenabled'] = self.module.params.get('securitygroups_enabled')
zone = None
if not self.module.check_mode:
res = self.query_api('createZone', **args)
zone = res['zone']
return zone
def _update_zone(self):
zone = self.get_zone()
args = self._get_common_zone_args()
args['id'] = zone['id']
if self.has_changed(args, zone):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateZone', **args)
zone = res['zone']
return zone
def absent_zone(self):
zone = self.get_zone()
if zone:
self.result['changed'] = True
args = {
'id': zone['id']
}
if not self.module.check_mode:
self.query_api('deleteZone', **args)
return zone
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
id=dict(),
name=dict(required=True),
dns1=dict(),
dns2=dict(),
internal_dns1=dict(),
internal_dns2=dict(),
dns1_ipv6=dict(),
dns2_ipv6=dict(),
network_type=dict(default='Basic', choices=['Basic', 'Advanced']),
network_domain=dict(),
guest_cidr_address=dict(),
dhcp_provider=dict(),
local_storage_enabled=dict(type='bool'),
securitygroups_enabled=dict(type='bool'),
state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
domain=dict(),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_zone = AnsibleCloudStackZone(module)
state = module.params.get('state')
if state in ['absent']:
zone = acs_zone.absent_zone()
else:
zone = acs_zone.present_zone()
result = acs_zone.get_result(zone)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
saurabh6790/tru_app_back | hr/doctype/leave_application/leave_application.py | 29 | 13508 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _
from webnotes.utils import cint, cstr, date_diff, flt, formatdate, getdate, get_url_to_form, \
comma_or, get_fullname
from webnotes import msgprint
class LeaveDayBlockedError(webnotes.ValidationError): pass
class OverlapError(webnotes.ValidationError): pass
class InvalidLeaveApproverError(webnotes.ValidationError): pass
from webnotes.model.controller import DocListController
class DocType(DocListController):
def setup(self):
if webnotes.conn.exists(self.doc.doctype, self.doc.name):
self.previous_doc = webnotes.doc(self.doc.doctype, self.doc.name)
else:
self.previous_doc = None
def validate(self):
self.validate_to_date()
self.validate_balance_leaves()
self.validate_leave_overlap()
self.validate_max_days()
self.show_block_day_warning()
self.validate_block_days()
self.validate_leave_approver()
def on_update(self):
if (not self.previous_doc and self.doc.leave_approver) or (self.previous_doc and \
self.doc.status == "Open" and self.previous_doc.leave_approver != self.doc.leave_approver):
# notify leave approver about creation
self.notify_leave_approver()
elif self.previous_doc and \
self.previous_doc.status == "Open" and self.doc.status == "Rejected":
# notify employee about rejection
self.notify_employee(self.doc.status)
def on_submit(self):
if self.doc.status != "Approved":
webnotes.msgprint("""Only Leave Applications with status 'Approved' can be Submitted.""",
raise_exception=True)
# notify leave applier about approval
self.notify_employee(self.doc.status)
def on_cancel(self):
# notify leave applier about cancellation
self.notify_employee("cancelled")
def show_block_day_warning(self):
from hr.doctype.leave_block_list.leave_block_list import get_applicable_block_dates
block_dates = get_applicable_block_dates(self.doc.from_date, self.doc.to_date,
self.doc.employee, self.doc.company, all_lists=True)
if block_dates:
webnotes.msgprint(_("Warning: Leave application contains following block dates") + ":")
for d in block_dates:
webnotes.msgprint(formatdate(d.block_date) + ": " + d.reason)
def validate_block_days(self):
from hr.doctype.leave_block_list.leave_block_list import get_applicable_block_dates
block_dates = get_applicable_block_dates(self.doc.from_date, self.doc.to_date,
self.doc.employee, self.doc.company)
if block_dates:
if self.doc.status == "Approved":
webnotes.msgprint(_("Cannot approve leave as you are not authorized to approve leaves on Block Dates."))
raise LeaveDayBlockedError
def get_holidays(self):
tot_hol = webnotes.conn.sql("""select count(*) from `tabHoliday` h1, `tabHoliday List` h2, `tabEmployee` e1
where e1.name = %s and h1.parent = h2.name and e1.holiday_list = h2.name
and h1.holiday_date between %s and %s""", (self.doc.employee, self.doc.from_date, self.doc.to_date))
if not tot_hol:
tot_hol = webnotes.conn.sql("""select count(*) from `tabHoliday` h1, `tabHoliday List` h2
where h1.parent = h2.name and h1.holiday_date between %s and %s
and ifnull(h2.is_default,0) = 1 and h2.fiscal_year = %s""",
(self.doc.from_date, self.doc.to_date, self.doc.fiscal_year))
return tot_hol and flt(tot_hol[0][0]) or 0
def get_total_leave_days(self):
"""Calculates total leave days based on input and holidays"""
ret = {'total_leave_days' : 0.5}
if not self.doc.half_day:
tot_days = date_diff(self.doc.to_date, self.doc.from_date) + 1
holidays = self.get_holidays()
ret = {
'total_leave_days' : flt(tot_days)-flt(holidays)
}
return ret
def validate_to_date(self):
if self.doc.from_date and self.doc.to_date and \
(getdate(self.doc.to_date) < getdate(self.doc.from_date)):
msgprint("To date cannot be before from date")
raise Exception
def validate_balance_leaves(self):
if self.doc.from_date and self.doc.to_date:
self.doc.total_leave_days = self.get_total_leave_days()["total_leave_days"]
if self.doc.total_leave_days == 0:
msgprint(_("Hurray! The day(s) on which you are applying for leave \
coincide with holiday(s). You need not apply for leave."),
raise_exception=1)
if not is_lwp(self.doc.leave_type):
self.doc.leave_balance = get_leave_balance(self.doc.employee,
self.doc.leave_type, self.doc.fiscal_year)["leave_balance"]
if self.doc.status != "Rejected" \
and self.doc.leave_balance - self.doc.total_leave_days < 0:
#check if this leave type allow the remaining balance to be in negative. If yes then warn the user and continue to save else warn the user and don't save.
msgprint("There is not enough leave balance for Leave Type: %s" % \
(self.doc.leave_type,),
raise_exception=not(webnotes.conn.get_value("Leave Type", self.doc.leave_type,"allow_negative") or None))
def validate_leave_overlap(self):
if not self.doc.name:
self.doc.name = "New Leave Application"
for d in webnotes.conn.sql("""select name, leave_type, posting_date,
from_date, to_date
from `tabLeave Application`
where
employee = %(employee)s
and docstatus < 2
and status in ("Open", "Approved")
and (from_date between %(from_date)s and %(to_date)s
or to_date between %(from_date)s and %(to_date)s
or %(from_date)s between from_date and to_date)
and name != %(name)s""", self.doc.fields, as_dict = 1):
msgprint("Employee : %s has already applied for %s between %s and %s on %s. Please refer Leave Application : <a href=\"#Form/Leave Application/%s\">%s</a>" % (self.doc.employee, cstr(d['leave_type']), formatdate(d['from_date']), formatdate(d['to_date']), formatdate(d['posting_date']), d['name'], d['name']), raise_exception = OverlapError)
def validate_max_days(self):
max_days = webnotes.conn.sql("select max_days_allowed from `tabLeave Type` where name = '%s'" %(self.doc.leave_type))
max_days = max_days and flt(max_days[0][0]) or 0
if max_days and self.doc.total_leave_days > max_days:
msgprint("Sorry ! You cannot apply for %s for more than %s days" % (self.doc.leave_type, max_days))
raise Exception
def validate_leave_approver(self):
employee = webnotes.bean("Employee", self.doc.employee)
leave_approvers = [l.leave_approver for l in
employee.doclist.get({"parentfield": "employee_leave_approvers"})]
if len(leave_approvers) and self.doc.leave_approver not in leave_approvers:
msgprint(("[" + _("For Employee") + ' "' + self.doc.employee + '"] '
+ _("Leave Approver can be one of") + ": "
+ comma_or(leave_approvers)), raise_exception=InvalidLeaveApproverError)
elif self.doc.leave_approver and not webnotes.conn.sql("""select name from `tabUserRole`
where parent=%s and role='Leave Approver'""", self.doc.leave_approver):
msgprint(get_fullname(self.doc.leave_approver) + ": " \
+ _("does not have role 'Leave Approver'"), raise_exception=InvalidLeaveApproverError)
def notify_employee(self, status):
employee = webnotes.doc("Employee", self.doc.employee)
if not employee.user_id:
return
def _get_message(url=False):
if url:
name = get_url_to_form(self.doc.doctype, self.doc.name)
else:
name = self.doc.name
return (_("Leave Application") + ": %s - %s") % (name, _(status))
self.notify({
# for post in messages
"message": _get_message(url=True),
"message_to": employee.user_id,
"subject": _get_message(),
})
def notify_leave_approver(self):
employee = webnotes.doc("Employee", self.doc.employee)
def _get_message(url=False):
name = self.doc.name
employee_name = cstr(employee.employee_name)
if url:
name = get_url_to_form(self.doc.doctype, self.doc.name)
employee_name = get_url_to_form("Employee", self.doc.employee, label=employee_name)
return (_("New Leave Application") + ": %s - " + _("Employee") + ": %s") % (name, employee_name)
self.notify({
# for post in messages
"message": _get_message(url=True),
"message_to": self.doc.leave_approver,
# for email
"subject": _get_message()
})
def notify(self, args):
args = webnotes._dict(args)
from core.page.messages.messages import post
post({"txt": args.message, "contact": args.message_to, "subject": args.subject,
"notify": cint(self.doc.follow_via_email)})
@webnotes.whitelist()
def get_leave_balance(employee, leave_type, fiscal_year):
leave_all = webnotes.conn.sql("""select total_leaves_allocated
from `tabLeave Allocation` where employee = %s and leave_type = %s
and fiscal_year = %s and docstatus = 1""", (employee,
leave_type, fiscal_year))
leave_all = leave_all and flt(leave_all[0][0]) or 0
leave_app = webnotes.conn.sql("""select SUM(total_leave_days)
from `tabLeave Application`
where employee = %s and leave_type = %s and fiscal_year = %s
and status="Approved" and docstatus = 1""", (employee, leave_type, fiscal_year))
leave_app = leave_app and flt(leave_app[0][0]) or 0
ret = {'leave_balance': leave_all - leave_app}
return ret
def is_lwp(leave_type):
lwp = webnotes.conn.sql("select is_lwp from `tabLeave Type` where name = %s", leave_type)
return lwp and cint(lwp[0][0]) or 0
@webnotes.whitelist()
def get_events(start, end):
events = []
employee = webnotes.conn.get_default("employee", webnotes.session.user)
company = webnotes.conn.get_default("company", webnotes.session.user)
from webnotes.widgets.reportview import build_match_conditions
match_conditions = build_match_conditions("Leave Application")
# show department leaves for employee
if "Employee" in webnotes.get_roles():
add_department_leaves(events, start, end, employee, company)
add_leaves(events, start, end, employee, company, match_conditions)
add_block_dates(events, start, end, employee, company)
add_holidays(events, start, end, employee, company)
return events
def add_department_leaves(events, start, end, employee, company):
department = webnotes.conn.get_value("Employee", employee, "department")
if not department:
return
# department leaves
department_employees = webnotes.conn.sql_list("""select name from tabEmployee where department=%s
and company=%s""", (department, company))
match_conditions = "employee in (\"%s\")" % '", "'.join(department_employees)
add_leaves(events, start, end, employee, company, match_conditions=match_conditions)
def add_leaves(events, start, end, employee, company, match_conditions=None):
query = """select name, from_date, to_date, employee_name, half_day,
status, employee, docstatus
from `tabLeave Application` where
(from_date between %s and %s or to_date between %s and %s)
and docstatus < 2
and status!="Rejected" """
if match_conditions:
query += " and " + match_conditions
for d in webnotes.conn.sql(query, (start, end, start, end), as_dict=True):
e = {
"name": d.name,
"doctype": "Leave Application",
"from_date": d.from_date,
"to_date": d.to_date,
"status": d.status,
"title": cstr(d.employee_name) + \
(d.half_day and _(" (Half Day)") or ""),
"docstatus": d.docstatus
}
if e not in events:
events.append(e)
def add_block_dates(events, start, end, employee, company):
# block days
from hr.doctype.leave_block_list.leave_block_list import get_applicable_block_dates
cnt = 0
block_dates = get_applicable_block_dates(start, end, employee, company, all_lists=True)
for block_date in block_dates:
events.append({
"doctype": "Leave Block List Date",
"from_date": block_date.block_date,
"title": _("Leave Blocked") + ": " + block_date.reason,
"name": "_" + str(cnt),
})
cnt+=1
def add_holidays(events, start, end, employee, company):
applicable_holiday_list = webnotes.conn.get_value("Employee", employee, "holiday_list")
if not applicable_holiday_list:
return
for holiday in webnotes.conn.sql("""select name, holiday_date, description
from `tabHoliday` where parent=%s and holiday_date between %s and %s""",
(applicable_holiday_list, start, end), as_dict=True):
events.append({
"doctype": "Holiday",
"from_date": holiday.holiday_date,
"title": _("Holiday") + ": " + cstr(holiday.description),
"name": holiday.name
})
@webnotes.whitelist()
def query_for_permitted_employees(doctype, txt, searchfield, start, page_len, filters):
txt = "%" + cstr(txt) + "%"
if "Leave Approver" in webnotes.user.get_roles():
condition = """and (exists(select ela.name from `tabEmployee Leave Approver` ela
where ela.parent=`tabEmployee`.name and ela.leave_approver= "%s") or
not exists(select ela.name from `tabEmployee Leave Approver` ela
where ela.parent=`tabEmployee`.name)
or user_id = "%s")""" % (webnotes.session.user, webnotes.session.user)
else:
from webnotes.widgets.reportview import build_match_conditions
condition = build_match_conditions("Employee")
condition = ("and " + condition) if condition else ""
return webnotes.conn.sql("""select name, employee_name from `tabEmployee`
where status = 'Active' and docstatus < 2 and
(`%s` like %s or employee_name like %s) %s
order by
case when name like %s then 0 else 1 end,
case when employee_name like %s then 0 else 1 end,
name limit %s, %s""" % tuple([searchfield] + ["%s"]*2 + [condition] + ["%s"]*4),
(txt, txt, txt, txt, start, page_len))
| agpl-3.0 |
alfredoavanzosc/odoomrp-wip-1 | sale_documents_comments/models/stock.py | 2 | 2266 | # -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import api, models, fields
class StockPicking(models.Model):
_inherit = 'stock.picking'
sale_comment = fields.Text(string='Internal comments')
sale_propagated_comment = fields.Text(
string='Propagated internal comments')
@api.one
@api.onchange('partner_id')
def onchange_partner_id(self):
picking_com, picking_pcom = self.partner_id._get_picking_comments()
self.sale_comment = picking_com
self.sale_propagated_comment = picking_pcom
@api.model
def create(self, values):
partner_id = values.get('partner_id', False)
origin = values.get('origin', False)
comment = values.get('sale_comment', '') or ''
pcomment = values.get('sale_propagated_comment', '') or ''
if partner_id:
if origin:
sale_obj = self.env['sale.order']
sale = sale_obj.search([('name', '=', origin)], limit=1)
pcomment += '\n%s' % (sale.propagated_comment or '')
partner = self.env['res.partner'].browse(partner_id)
picking_com, picking_pcom = partner._get_picking_comments()
comment += '\n%s' % (picking_com or '')
pcomment += '\n%s' % (picking_pcom or '')
values.update({'sale_comment': comment,
'sale_propagated_comment': pcomment})
return super(StockPicking, self).create(values)
@api.model
def _create_invoice_from_picking(self, picking, values):
sale_comment = values.get('sale_comment', '')
sale_comment += (
'\n%s' % (picking.sale_propagated_comment or ''))
partner_id = values.get('partner_id')
if partner_id:
partner = self.env['res.partner'].browse(partner_id)
sale_comment += '\n%s' % (partner._get_invoice_comments() or '')
values['sale_comment'] = sale_comment
return super(StockPicking, self)._create_invoice_from_picking(
picking, values)
| agpl-3.0 |
armersong/zato | code/zato-common/src/zato/common/crypto.py | 6 | 2744 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging
from base64 import b64decode, b64encode
# PyCrypto
from Crypto.PublicKey import RSA as pycrypto_rsa
# rsa
import rsa
logger = logging.getLogger(__name__)
class CryptoManager(object):
""" Responsible for management of the server's crypto material.
"""
def __init__(self, priv_key_location=None, priv_key=None, pub_key_location=None, pub_key=None):
self.priv_key_location = priv_key_location
self.priv_key = priv_key
self.pub_key_location = pub_key_location
self.pub_key = pub_key
def _pkcs1_from_pkcs8(self, pkcs8):
""" Private keys saved by CLI are in PKCS#8 but the rsa module needs PKCS#1.
Note that PKCS#8 deals with private keys only (https://tools.ietf.org/html/rfc5208).
"""
key = pycrypto_rsa.importKey(pkcs8)
return key.exportKey()
def load_keys(self):
if self.pub_key_location:
pkcs1 = open(self.pub_key_location).read()
self.pub_key = rsa.key.PublicKey.load_pkcs1_openssl_pem(pkcs1)
else:
if self.priv_key_location:
pkcs8 = open(self.priv_key_location).read()
pkcs1 = self._pkcs1_from_pkcs8(pkcs8)
elif self.priv_key:
pkcs1 = self._pkcs1_from_pkcs8(self._pkcs1_from_pkcs8(self.priv_key))
self.priv_key = rsa.key.PrivateKey.load_pkcs1(pkcs1)
self.pub_key = rsa.key.PublicKey(self.priv_key.n, self.priv_key.e)
def decrypt(self, data, hexlified=True):
""" Decrypts data using the private config key. Padding used defaults
to PKCS#1. hexlified defaults to True and indicates whether the data
should be hex-decoded before being decrypted.
"""
if hexlified:
data = b64decode(data)
return rsa.decrypt(data, self.priv_key)
def encrypt(self, data, b64=True):
""" Encrypts data using the public config key. Padding used defaults
to PKCS#1. b64 defaults to True and indicates whether the data
should be BASE64-encoded after being encrypted.
"""
encrypted = rsa.encrypt(data, self.pub_key)
if b64:
return b64encode(encrypted)
return encrypted
def reset(self):
""" Sets all the keys to None.
"""
self.priv_key_location = None
self.pub_key_location = None
self.priv_key = None
self.pub_key = None
| gpl-3.0 |
j-griffith/cinder | cinder/message/message_field.py | 2 | 4086 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Message Resource, Action, Detail and user visible message.
Use Resource, Action and Detail's combination to indicate the Event
in the format of:
EVENT: VOLUME_RESOURCE_ACTION_DETAIL
Also, use exception-to-detail mapping to decrease the workload of
classifying event in cinder's task code.
"""
from cinder.i18n import _
class Resource(object):
VOLUME = 'VOLUME'
class Action(object):
SCHEDULE_ALLOCATE_VOLUME = ('001', _('schedule allocate volume'))
ATTACH_VOLUME = ('002', _('attach volume'))
COPY_VOLUME_TO_IMAGE = ('003', _('copy volume to image'))
UPDATE_ATTACHMENT = ('004', _('update attachment'))
COPY_IMAGE_TO_VOLUME = ('005', _('copy image to volume'))
UNMANAGE_VOLUME = ('006', _('unmanage volume'))
ALL = (SCHEDULE_ALLOCATE_VOLUME,
ATTACH_VOLUME,
COPY_VOLUME_TO_IMAGE,
UPDATE_ATTACHMENT,
COPY_IMAGE_TO_VOLUME,
UNMANAGE_VOLUME
)
class Detail(object):
UNKNOWN_ERROR = ('001', _('An unknown error occurred.'))
DRIVER_NOT_INITIALIZED = ('002',
_('Driver is not initialized at present.'))
NO_BACKEND_AVAILABLE = ('003',
_('Could not find any available '
'weighted backend.'))
FAILED_TO_UPLOAD_VOLUME = ('004',
_("Failed to upload volume to image "
"at backend."))
VOLUME_ATTACH_MODE_INVALID = ('005',
_("Volume's attach mode is invalid."))
QUOTA_EXCEED = ('006',
_("Not enough quota resource for operation."))
NOT_ENOUGH_SPACE_FOR_IMAGE = ('007',
_("Image used for creating volume exceeds "
"available space."))
UNMANAGE_ENC_NOT_SUPPORTED = (
'008',
_("Unmanaging encrypted volumes is not supported."))
ALL = (UNKNOWN_ERROR,
DRIVER_NOT_INITIALIZED,
NO_BACKEND_AVAILABLE,
FAILED_TO_UPLOAD_VOLUME,
VOLUME_ATTACH_MODE_INVALID,
QUOTA_EXCEED,
NOT_ENOUGH_SPACE_FOR_IMAGE,
UNMANAGE_ENC_NOT_SUPPORTED,
)
# Exception and detail mappings
EXCEPTION_DETAIL_MAPPINGS = {
DRIVER_NOT_INITIALIZED: ['DriverNotInitialized'],
NO_BACKEND_AVAILABLE: ['NoValidBackend'],
VOLUME_ATTACH_MODE_INVALID: ['InvalidVolumeAttachMode'],
QUOTA_EXCEED: ['ImageLimitExceeded',
'BackupLimitExceeded',
'SnapshotLimitExceeded'],
NOT_ENOUGH_SPACE_FOR_IMAGE: ['ImageTooBig'],
UNMANAGE_ENC_NOT_SUPPORTED: ['UnmanageEncVolNotSupported'],
}
def translate_action(action_id):
action_message = next((action[1] for action in Action.ALL
if action[0] == action_id), None)
return action_message or 'unknown action'
def translate_detail(detail_id):
detail_message = next((action[1] for action in Detail.ALL
if action[0] == detail_id), None)
return detail_message or Detail.UNKNOWN_ERROR[1]
def translate_detail_id(exception, detail):
if exception is not None and isinstance(exception, Exception):
for key, value in Detail.EXCEPTION_DETAIL_MAPPINGS.items():
if exception.__class__.__name__ in value:
return key[0]
if detail in Detail.ALL:
return detail[0]
return Detail.UNKNOWN_ERROR[0]
| apache-2.0 |
daviswr/ZenPacks.daviswr.ZFS | setup.py | 1 | 2434 | ################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.daviswr.ZFS"
VERSION = "0.8.0-dev"
AUTHOR = "Wes Davis"
LICENSE = "MIT"
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.daviswr']
PACKAGES = ['ZenPacks', 'ZenPacks.daviswr', 'ZenPacks.daviswr.ZFS']
INSTALL_REQUIRES = ['ZenPacks.zenoss.ZenPackLib']
COMPAT_ZENOSS_VERS = ">=4.2.5"
PREV_ZENPACK_NAME = ""
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name=NAME,
version=VERSION,
author=AUTHOR,
license=LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers=COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName=PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages=NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages=find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data=True,
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires=INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points={
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe=False,
)
| mit |
grap/OCB | addons/document/report/document_report.py | 76 | 4223 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
class report_document_user(osv.osv):
_name = "report.document.user"
_description = "Files details by Users"
_auto = False
_columns = {
'name': fields.char('Year', size=64,readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
'user_id': fields.many2one('res.users', 'Owner', readonly=True),
'user': fields.related('user_id', 'name', type='char', size=64, readonly=True),
'directory': fields.char('Directory',size=64,readonly=True),
'datas_fname': fields.char('File Name',size=64,readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'change_date': fields.datetime('Modified Date', readonly=True),
'file_size': fields.integer('File Size', readonly=True),
'nbr':fields.integer('# of Files', readonly=True),
'type':fields.char('Directory Type',size=64,readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_document_user')
cr.execute("""
CREATE OR REPLACE VIEW report_document_user as (
SELECT
min(f.id) as id,
to_char(f.create_date, 'YYYY') as name,
to_char(f.create_date, 'MM') as month,
f.user_id as user_id,
count(*) as nbr,
d.name as directory,
f.datas_fname as datas_fname,
f.create_date as create_date,
f.file_size as file_size,
min(d.type) as type,
f.write_date as change_date
FROM ir_attachment f
left join document_directory d on (f.parent_id=d.id and d.name<>'')
group by to_char(f.create_date, 'YYYY'), to_char(f.create_date, 'MM'),d.name,f.parent_id,d.type,f.create_date,f.user_id,f.file_size,d.type,f.write_date,f.datas_fname
)
""")
class report_document_file(osv.osv):
_name = "report.document.file"
_description = "Files details by Directory"
_auto = False
_columns = {
'file_size': fields.integer('File Size', readonly=True),
'nbr':fields.integer('# of Files', readonly=True),
'month': fields.char('Month', size=24,readonly=True),
}
_order = "month"
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_document_file')
cr.execute("""
create or replace view report_document_file as (
select min(f.id) as id,
count(*) as nbr,
min(EXTRACT(MONTH FROM f.create_date)||'-'||to_char(f.create_date,'Month')) as month,
sum(f.file_size) as file_size
from ir_attachment f
group by EXTRACT(MONTH FROM f.create_date)
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aleixrodri98/Un-proyecto-diferente | Series/obtenirtorrent.py | 1 | 4983 | from bs4 import BeautifulSoup
from collections import Counter
import urllib
import urllib2
import sys
import re
import subprocess
from subprocess import check_output
import difflib
import requests
def contestarVeu(missatge):
subprocess.Popen(["python","/home/pi/AleixDomo/Musica/pyvonabackground.py",missatge])
missatge = sys.argv[1]
respuesta = []
listalinks = []
listaservers = []
listacompletaservers = []
Dades = [line.rstrip('\n') for line in open('/home/pi/AleixDomo/txt/pelis.txt', 'r')]
url = 'http://www.newpct1.com/buscar'
values = {'q' : missatge}
#request GET
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
the_page = response.read()
soup = BeautifulSoup("".join(the_page))
urlfinallist = soup.findAll('a', href=re.compile('^http://www.newpct1.com/pelicula/'))
#busca
for i in range(2,len(urlfinallist)):
respuesta.append(str(urlfinallist[i]).split('/')[4])
respuesta = respuesta[:3]
respuesta = sorted(respuesta, key=lambda x: difflib.SequenceMatcher(None, x, missatge).ratio(), reverse=True)
pelicula = respuesta[0]
peliculacontestar = pelicula.replace ("-", " ")
for i in Dades:
i = i.split("/")
if peliculacontestar == i[0]:
try:
if i[1]:
minutsave = int(i[1])
except:
minutsave = None
subprocess.Popen(["sudo", "python", "/home/pi/AleixDomo/Series/PeliActual/caratula.py", peliculacontestar])
peliculacontestar2 = "Poniendo: " + peliculacontestar
contestarVeu(peliculacontestar2)
url = "http://www.newpct1.com/descarga-torrent/pelicula/" + pelicula + "/"
pageFile = urllib.urlopen(url)
pageHtml = pageFile.read()
pageFile.close()
soup = BeautifulSoup("".join(pageHtml))
#part buscar links
urlfinallist = soup.findAll('div',attrs={'id':'tab3'});
for i in urlfinallist:
ola = i.findAll('a', href=True)
for i in ola:
link = str(i).split('href="')[1].split('"')[0]
try:
server = str(i).split('://www.')[1].split('.')[0]
if server == "videoweed":
listalinks.append(link)
listaservers.append(server)
except:
try:
server = str(i).split('://')[1].split('.')[0]
if server == "videomega" or server == "streamcloud":
listalinks.append(link)
listaservers.append(server)
except:
pass
for x in xrange(len(listaservers)):
listacompletaservers.append([])
for i in xrange(len(listaservers)):
listacompletaservers[i].append(listaservers[i])
listacompletaservers[i].append(listalinks[i])
SORT_ORDER = {"streamcloud": 0, "videoweed": 1, "videomega": 2}
listacompletaservers.sort(key=lambda val: SORT_ORDER[val[0]])
for i in xrange(len(listacompletaservers)):
if listacompletaservers[i][0] == "streamcloud":
pageFile = urllib.urlopen(listacompletaservers[i][1])
pageHtml = pageFile.read()
pageFile.close()
soupa = BeautifulSoup("".join(pageHtml))
ola = soupa.find('input')
if ola != None:
linkbo = listacompletaservers[i][1]
break
elif listacompletaservers[i][0] == "videoweed":
pageFile = urllib.urlopen(listacompletaservers[i][1])
pageHtml = pageFile.read()
pageFile.close()
soupa = BeautifulSoup("".join(pageHtml))
ola = soupa.find('h3')
if ola == None:
linkbo = listacompletaservers[i][1]
break
elif listacompletaservers[i][0] == "videomega":
linkbo = listacompletaservers[i][1]
break
try:
if linkbo:
out = check_output(["youtube-dl", "-g", linkbo])
out = out.rstrip('\n')
try:
minutsave = minutsave/1000000/60
hora = "00:" + str(minutsave) + ":00"
process = subprocess.Popen(["omxplayer", "-b", "-g","-l" ,hora, "--user-agent" ,"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36", out])
except:
process = subprocess.Popen(["omxplayer", "-b ", "-g", "--user-agent" ,"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36", out])
process.wait()
if 'SubmitEOS' in open('/home/pi/omxplayer.log').read():
pass
else:
for line in reversed(open("/home/pi/omxplayer.log").readlines()):
line = line.rstrip()
if "cur:" in line:
linia = line.split('cur:')[1].split(',')[0]
break
f2 = open("/home/pi/AleixDomo/txt/pelis.txt",'a')
f2.write(peliculacontestar + "/" + linia + "\n")
f2.close()
except:
try:
params = {'limit': 1 , 'keywords': missatge}
r = requests.get('http://pelismag.net/api', params=params).json()
print r
i = r[0]
try:
if i['magnets']['M1080']['magnet']:
subprocess.Popen(["peerflix", i['magnets']['M1080']['magnet'], "-o", "--", "-b"])
else:
subprocess.Popen(["peerflix", i['magnets']['M720']['magnet'], "-o", "--", "-b"])
except:
try:
subprocess.Popen(["peerflix", i['magnets']['M720']['magnet'], "-o", "--", "-b"])
except:
contestarVeu("La pelicula seleccionada no existe o no esta disponible")
except:
contestarVeu("La pelicula seleccionada no existe o no esta disponible")
file = open("/home/pi/AleixDomo/txt/escoltar.txt", "w")
file.write("0")
file.close()
| gpl-3.0 |
kayframework/kay-framework | kay/lib/babel/messages/frontend.py | 7 | 49756 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Frontends for the message extraction functionality."""
from ConfigParser import RawConfigParser
from datetime import datetime
from distutils import log
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError, DistutilsSetupError
from locale import getpreferredencoding
import logging
from optparse import OptionParser
import os
import re
import shutil
from StringIO import StringIO
import sys
import tempfile
from babel import __version__ as VERSION
from babel import Locale, localedata
from babel.core import UnknownLocaleError
from babel.messages.catalog import Catalog
from babel.messages.extract import extract_from_dir, DEFAULT_KEYWORDS, \
DEFAULT_MAPPING
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po, write_po
from babel.messages.plurals import PLURALS
from babel.util import odict, LOCALTZ
__all__ = ['CommandLineInterface', 'compile_catalog', 'extract_messages',
'init_catalog', 'check_message_extractors', 'update_catalog']
__docformat__ = 'restructuredtext en'
class compile_catalog(Command):
"""Catalog compilation command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import compile_catalog
setup(
...
cmdclass = {'compile_catalog': compile_catalog}
)
:since: version 0.9
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'compile message catalogs to binary MO files'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('directory=', 'd',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale of the catalog to compile'),
('use-fuzzy', 'f',
'also include fuzzy translations'),
('statistics', None,
'print statistics about translations')
]
boolean_options = ['use-fuzzy', 'statistics']
def initialize_options(self):
self.domain = 'messages'
self.directory = None
self.input_file = None
self.output_file = None
self.locale = None
self.use_fuzzy = False
self.statistics = False
def finalize_options(self):
if not self.input_file and not self.directory:
raise DistutilsOptionError('you must specify either the input file '
'or the base directory')
if not self.output_file and not self.directory:
raise DistutilsOptionError('you must specify either the input file '
'or the base directory')
def run(self):
po_files = []
mo_files = []
if not self.input_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.mo'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale,
'LC_MESSAGES', self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(self.directory, locale,
'LC_MESSAGES',
self.domain + '.mo'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
mo_files.append(self.output_file)
else:
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.mo'))
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'r')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if self.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated +=1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
log.info('%d of %d messages (%d%%) translated in %r',
translated, len(catalog), percentage, po_file)
if catalog.fuzzy and not self.use_fuzzy:
log.warn('catalog %r is marked as fuzzy, skipping', po_file)
continue
for message, errors in catalog.check():
for error in errors:
log.error('error: %s:%d: %s', po_file, message.lineno,
error)
log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
finally:
outfile.close()
class extract_messages(Command):
"""Message extraction command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import extract_messages
setup(
...
cmdclass = {'extract_messages': extract_messages}
)
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'extract localizable strings from the project code'
user_options = [
('charset=', None,
'charset to use in the output file'),
('keywords=', 'k',
'space-separated list of keywords to look for in addition to the '
'defaults'),
('no-default-keywords', None,
'do not include the default keywords'),
('mapping-file=', 'F',
'path to the mapping configuration file'),
('no-location', None,
'do not include location comments with filename and line number'),
('omit-header', None,
'do not include msgid "" entry in header'),
('output-file=', 'o',
'name of the output file'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
('sort-output', None,
'generate sorted output (default False)'),
('sort-by-file', None,
'sort output by file location (default False)'),
('msgid-bugs-address=', None,
'set report address for msgid'),
('copyright-holder=', None,
'set copyright holder in output'),
('add-comments=', 'c',
'place comment block with TAG (or those preceding keyword lines) in '
'output file. Seperate multiple TAGs with commas(,)'),
('strip-comments', None,
'strip the comment TAGs from the comments.'),
('input-dirs=', None,
'directories that should be scanned for messages'),
]
boolean_options = [
'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
'sort-output', 'sort-by-file', 'strip-comments'
]
def initialize_options(self):
self.charset = 'utf-8'
self.keywords = ''
self._keywords = DEFAULT_KEYWORDS.copy()
self.no_default_keywords = False
self.mapping_file = None
self.no_location = False
self.omit_header = False
self.output_file = None
self.input_dirs = None
self.width = None
self.no_wrap = False
self.sort_output = False
self.sort_by_file = False
self.msgid_bugs_address = None
self.copyright_holder = None
self.add_comments = None
self._add_comments = []
self.strip_comments = False
def finalize_options(self):
if self.no_default_keywords and not self.keywords:
raise DistutilsOptionError('you must specify new keywords if you '
'disable the default ones')
if self.no_default_keywords:
self._keywords = {}
if self.keywords:
self._keywords.update(parse_keywords(self.keywords.split()))
if not self.output_file:
raise DistutilsOptionError('no output file specified')
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.sort_output and self.sort_by_file:
raise DistutilsOptionError("'--sort-output' and '--sort-by-file' "
"are mutually exclusive")
if not self.input_dirs:
self.input_dirs = dict.fromkeys([k.split('.',1)[0]
for k in self.distribution.packages
]).keys()
if self.add_comments:
self._add_comments = self.add_comments.split(',')
def run(self):
mappings = self._get_mappings()
outfile = open(self.output_file, 'w')
try:
catalog = Catalog(project=self.distribution.get_name(),
version=self.distribution.get_version(),
msgid_bugs_address=self.msgid_bugs_address,
copyright_holder=self.copyright_holder,
charset=self.charset)
for dirname, (method_map, options_map) in mappings.items():
def callback(filename, method, options):
if method == 'ignore':
return
filepath = os.path.normpath(os.path.join(dirname, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
log.info('extracting messages from %s%s', filepath, optstr)
extracted = extract_from_dir(dirname, method_map, options_map,
keywords=self._keywords,
comment_tags=self._add_comments,
callback=callback,
strip_comment_tags=
self.strip_comments)
for filename, lineno, message, comments in extracted:
filepath = os.path.normpath(os.path.join(dirname, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments)
log.info('writing PO template file to %s' % self.output_file)
write_po(outfile, catalog, width=self.width,
no_location=self.no_location,
omit_header=self.omit_header,
sort_output=self.sort_output,
sort_by_file=self.sort_by_file)
finally:
outfile.close()
def _get_mappings(self):
mappings = {}
if self.mapping_file:
fileobj = open(self.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
for dirname in self.input_dirs:
mappings[dirname] = method_map, options_map
finally:
fileobj.close()
elif getattr(self.distribution, 'message_extractors', None):
message_extractors = self.distribution.message_extractors
for dirname, mapping in message_extractors.items():
if isinstance(mapping, basestring):
method_map, options_map = parse_mapping(StringIO(mapping))
else:
method_map, options_map = [], {}
for pattern, method, options in mapping:
method_map.append((pattern, method))
options_map[pattern] = options or {}
mappings[dirname] = method_map, options_map
else:
for dirname in self.input_dirs:
mappings[dirname] = DEFAULT_MAPPING, {}
return mappings
def check_message_extractors(dist, name, value):
"""Validate the ``message_extractors`` keyword argument to ``setup()``.
:param dist: the distutils/setuptools ``Distribution`` object
:param name: the name of the keyword argument (should always be
"message_extractors")
:param value: the value of the keyword argument
:raise `DistutilsSetupError`: if the value is not valid
:see: `Adding setup() arguments
<http://peak.telecommunity.com/DevCenter/setuptools#adding-setup-arguments>`_
"""
assert name == 'message_extractors'
if not isinstance(value, dict):
raise DistutilsSetupError('the value of the "message_extractors" '
'parameter must be a dictionary')
class init_catalog(Command):
"""New catalog initialization command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import init_catalog
setup(
...
cmdclass = {'init_catalog': init_catalog}
)
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'create a new catalog based on a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to output directory'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale for the new localized catalog'),
]
def initialize_options(self):
self.output_dir = None
self.output_file = None
self.input_file = None
self.locale = None
self.domain = 'messages'
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.locale:
raise DistutilsOptionError('you must provide a locale for the '
'new catalog')
try:
self._locale = Locale.parse(self.locale)
except UnknownLocaleError, e:
raise DistutilsOptionError(e)
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output directory')
if not self.output_file:
self.output_file = os.path.join(self.output_dir, self.locale,
'LC_MESSAGES', self.domain + '.po')
if not os.path.exists(os.path.dirname(self.output_file)):
os.makedirs(os.path.dirname(self.output_file))
def run(self):
log.info('creating catalog %r based on %r', self.output_file,
self.input_file)
infile = open(self.input_file, 'r')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correcly calculate plurals
catalog = read_po(infile, locale=self.locale)
finally:
infile.close()
catalog.locale = self._locale
catalog.fuzzy = False
outfile = open(self.output_file, 'w')
try:
write_po(outfile, catalog)
finally:
outfile.close()
class update_catalog(Command):
"""Catalog merging command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import update_catalog
setup(
...
cmdclass = {'update_catalog': update_catalog}
)
:since: version 0.9
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'update message catalogs from a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to base directory containing the catalogs'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale of the catalog to compile'),
('ignore-obsolete=', None,
'whether to omit obsolete messages from the output'),
('no-fuzzy-matching', 'N',
'do not use fuzzy matching'),
('previous', None,
'keep previous msgids of translated messages')
]
boolean_options = ['ignore_obsolete', 'no_fuzzy_matching', 'previous']
def initialize_options(self):
self.domain = 'messages'
self.input_file = None
self.output_dir = None
self.output_file = None
self.locale = None
self.ignore_obsolete = False
self.no_fuzzy_matching = False
self.previous = False
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output file or '
'directory')
if self.output_file and not self.locale:
raise DistutilsOptionError('you must specify the locale')
if self.no_fuzzy_matching and self.previous:
self.previous = False
def run(self):
po_files = []
if not self.output_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.output_dir, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
else:
for locale in os.listdir(self.output_dir):
po_file = os.path.join(self.output_dir, locale,
'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((self.locale, self.output_file))
domain = self.domain
if not domain:
domain = os.path.splitext(os.path.basename(self.input_file))[0]
infile = open(self.input_file, 'U')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for locale, filename in po_files:
log.info('updating catalog %r based on %r', filename,
self.input_file)
infile = open(filename, 'U')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(template, self.no_fuzzy_matching)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'w')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=self.ignore_obsolete,
include_previous=self.previous)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
class CommandLineInterface(object):
"""Command-line interface.
This class provides a simple command-line interface to the message
extraction and PO file generation functionality.
"""
usage = '%%prog %s [options] %s'
version = '%%prog %s' % VERSION
commands = {
'compile': 'compile message catalogs to MO files',
'extract': 'extract messages from source files and generate a POT file',
'init': 'create new message catalogs from a POT file',
'update': 'update existing message catalogs from a POT file'
}
def run(self, argv=sys.argv):
"""Main entry point of the command-line interface.
:param argv: list of arguments passed on the command-line
"""
self.parser = OptionParser(usage=self.usage % ('command', '[args]'),
version=self.version)
self.parser.disable_interspersed_args()
self.parser.print_help = self._help
self.parser.add_option('--list-locales', dest='list_locales',
action='store_true',
help="print all known locales and exit")
self.parser.add_option('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
help='print as much as possible')
self.parser.add_option('-q', '--quiet', action='store_const',
dest='loglevel', const=logging.ERROR,
help='print as little as possible')
self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
options, args = self.parser.parse_args(argv[1:])
# Configure logging
self.log = logging.getLogger('babel')
self.log.setLevel(options.loglevel)
handler = logging.StreamHandler()
handler.setLevel(options.loglevel)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
self.log.addHandler(handler)
if options.list_locales:
identifiers = localedata.list()
longest = max([len(identifier) for identifier in identifiers])
identifiers.sort()
format = u'%%-%ds %%s' % (longest + 1)
for identifier in identifiers:
locale = Locale.parse(identifier)
output = format % (identifier, locale.english_name)
print output.encode(sys.stdout.encoding or
getpreferredencoding() or
'ascii', 'replace')
return 0
if not args:
self.parser.error('no valid command or option passed. '
'Try the -h/--help option for more information.')
cmdname = args[0]
if cmdname not in self.commands:
self.parser.error('unknown command "%s"' % cmdname)
return getattr(self, cmdname)(args[1:])
def _help(self):
print self.parser.format_help()
print "commands:"
longest = max([len(command) for command in self.commands])
format = " %%-%ds %%s" % max(8, longest + 1)
commands = self.commands.items()
commands.sort()
for name, description in commands:
print format % (name, description)
def compile(self, argv):
"""Subcommand for compiling a message catalog to a MO file.
:param argv: the command arguments
:since: version 0.9
"""
parser = OptionParser(usage=self.usage % ('compile', ''),
description=self.commands['compile'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of MO and PO files (default '%default')")
parser.add_option('--directory', '-d', dest='directory',
metavar='DIR', help='base directory of catalog files')
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale of the catalog')
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.mo')")
parser.add_option('--use-fuzzy', '-f', dest='use_fuzzy',
action='store_true',
help='also include fuzzy translations (default '
'%default)')
parser.add_option('--statistics', dest='statistics',
action='store_true',
help='print statistics about translations')
parser.set_defaults(domain='messages', use_fuzzy=False,
compile_all=False, statistics=False)
options, args = parser.parse_args(argv)
po_files = []
mo_files = []
if not options.input_file:
if not options.directory:
parser.error('you must specify either the input file or the '
'base directory')
if options.locale:
po_files.append((options.locale,
os.path.join(options.directory,
options.locale, 'LC_MESSAGES',
options.domain + '.po')))
mo_files.append(os.path.join(options.directory, options.locale,
'LC_MESSAGES',
options.domain + '.mo'))
else:
for locale in os.listdir(options.directory):
po_file = os.path.join(options.directory, locale,
'LC_MESSAGES', options.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(options.directory, locale,
'LC_MESSAGES',
options.domain + '.mo'))
else:
po_files.append((options.locale, options.input_file))
if options.output_file:
mo_files.append(options.output_file)
else:
if not options.directory:
parser.error('you must specify either the input file or '
'the base directory')
mo_files.append(os.path.join(options.directory, options.locale,
'LC_MESSAGES',
options.domain + '.mo'))
if not po_files:
parser.error('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'r')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if options.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated +=1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
self.log.info("%d of %d messages (%d%%) translated in %r",
translated, len(catalog), percentage, po_file)
if catalog.fuzzy and not options.use_fuzzy:
self.log.warn('catalog %r is marked as fuzzy, skipping',
po_file)
continue
for message, errors in catalog.check():
for error in errors:
self.log.error('error: %s:%d: %s', po_file, message.lineno,
error)
self.log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=options.use_fuzzy)
finally:
outfile.close()
def extract(self, argv):
"""Subcommand for extracting messages from source files and generating
a POT file.
:param argv: the command arguments
"""
parser = OptionParser(usage=self.usage % ('extract', 'dir1 <dir2> ...'),
description=self.commands['extract'])
parser.add_option('--charset', dest='charset',
help='charset to use in the output (default '
'"%default")')
parser.add_option('-k', '--keyword', dest='keywords', action='append',
help='keywords to look for in addition to the '
'defaults. You can specify multiple -k flags on '
'the command line.')
parser.add_option('--no-default-keywords', dest='no_default_keywords',
action='store_true',
help="do not include the default keywords")
parser.add_option('--mapping', '-F', dest='mapping_file',
help='path to the extraction mapping file')
parser.add_option('--no-location', dest='no_location',
action='store_true',
help='do not include location comments with filename '
'and line number')
parser.add_option('--omit-header', dest='omit_header',
action='store_true',
help='do not include msgid "" entry in header')
parser.add_option('-o', '--output', dest='output',
help='path to the output POT file')
parser.add_option('-w', '--width', dest='width', type='int',
help="set output line width (default 76)")
parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true',
help='do not break long message lines, longer than '
'the output line width, into several lines')
parser.add_option('--sort-output', dest='sort_output',
action='store_true',
help='generate sorted output (default False)')
parser.add_option('--sort-by-file', dest='sort_by_file',
action='store_true',
help='sort output by file location (default False)')
parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address',
metavar='EMAIL@ADDRESS',
help='set report address for msgid')
parser.add_option('--copyright-holder', dest='copyright_holder',
help='set copyright holder in output')
parser.add_option('--project', dest='project',
help='set project name in output')
parser.add_option('--version', dest='version',
help='set project version in output')
parser.add_option('--add-comments', '-c', dest='comment_tags',
metavar='TAG', action='append',
help='place comment block with TAG (or those '
'preceding keyword lines) in output file. One '
'TAG per argument call')
parser.add_option('--strip-comment-tags', '-s',
dest='strip_comment_tags', action='store_true',
help='Strip the comment tags from the comments.')
parser.set_defaults(charset='utf-8', keywords=[],
no_default_keywords=False, no_location=False,
omit_header = False, width=None, no_wrap=False,
sort_output=False, sort_by_file=False,
comment_tags=[], strip_comment_tags=False)
options, args = parser.parse_args(argv)
if not args:
parser.error('incorrect number of arguments')
if options.output not in (None, '-'):
outfile = open(options.output, 'w')
else:
outfile = sys.stdout
keywords = DEFAULT_KEYWORDS.copy()
if options.no_default_keywords:
if not options.keywords:
parser.error('you must specify new keywords if you disable the '
'default ones')
keywords = {}
if options.keywords:
keywords.update(parse_keywords(options.keywords))
if options.mapping_file:
fileobj = open(options.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
finally:
fileobj.close()
else:
method_map = DEFAULT_MAPPING
options_map = {}
if options.width and options.no_wrap:
parser.error("'--no-wrap' and '--width' are mutually exclusive.")
elif not options.width and not options.no_wrap:
options.width = 76
if options.sort_output and options.sort_by_file:
parser.error("'--sort-output' and '--sort-by-file' are mutually "
"exclusive")
try:
catalog = Catalog(project=options.project,
version=options.version,
msgid_bugs_address=options.msgid_bugs_address,
copyright_holder=options.copyright_holder,
charset=options.charset)
for dirname in args:
if not os.path.isdir(dirname):
parser.error('%r is not a directory' % dirname)
def callback(filename, method, options):
if method == 'ignore':
return
filepath = os.path.normpath(os.path.join(dirname, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
self.log.info('extracting messages from %s%s', filepath,
optstr)
extracted = extract_from_dir(dirname, method_map, options_map,
keywords, options.comment_tags,
callback=callback,
strip_comment_tags=
options.strip_comment_tags)
for filename, lineno, message, comments in extracted:
filepath = os.path.normpath(os.path.join(dirname, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments)
if options.output not in (None, '-'):
self.log.info('writing PO template file to %s' % options.output)
write_po(outfile, catalog, width=options.width,
no_location=options.no_location,
omit_header=options.omit_header,
sort_output=options.sort_output,
sort_by_file=options.sort_by_file)
finally:
if options.output:
outfile.close()
def init(self, argv):
"""Subcommand for creating new message catalogs from a template.
:param argv: the command arguments
"""
parser = OptionParser(usage=self.usage % ('init', ''),
description=self.commands['init'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of PO file (default '%default')")
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-dir', '-d', dest='output_dir',
metavar='DIR', help='path to output directory')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.po')")
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale for the new localized catalog')
parser.set_defaults(domain='messages')
options, args = parser.parse_args(argv)
if not options.locale:
parser.error('you must provide a locale for the new catalog')
try:
locale = Locale.parse(options.locale)
except UnknownLocaleError, e:
parser.error(e)
if not options.input_file:
parser.error('you must specify the input file')
if not options.output_file and not options.output_dir:
parser.error('you must specify the output file or directory')
if not options.output_file:
options.output_file = os.path.join(options.output_dir,
options.locale, 'LC_MESSAGES',
options.domain + '.po')
if not os.path.exists(os.path.dirname(options.output_file)):
os.makedirs(os.path.dirname(options.output_file))
infile = open(options.input_file, 'r')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correcly calculate plurals
catalog = read_po(infile, locale=options.locale)
finally:
infile.close()
catalog.locale = locale
catalog.revision_date = datetime.now(LOCALTZ)
self.log.info('creating catalog %r based on %r', options.output_file,
options.input_file)
outfile = open(options.output_file, 'w')
try:
write_po(outfile, catalog)
finally:
outfile.close()
def update(self, argv):
"""Subcommand for updating existing message catalogs from a template.
:param argv: the command arguments
:since: version 0.9
"""
parser = OptionParser(usage=self.usage % ('update', ''),
description=self.commands['update'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of PO file (default '%default')")
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-dir', '-d', dest='output_dir',
metavar='DIR', help='path to output directory')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.po')")
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale of the translations catalog')
parser.add_option('--ignore-obsolete', dest='ignore_obsolete',
action='store_true',
help='do not include obsolete messages in the output '
'(default %default)')
parser.add_option('--no-fuzzy-matching', '-N', dest='no_fuzzy_matching',
action='store_true',
help='do not use fuzzy matching (default %default)')
parser.add_option('--previous', dest='previous', action='store_true',
help='keep previous msgids of translated messages '
'(default %default)')
parser.set_defaults(domain='messages', ignore_obsolete=False,
no_fuzzy_matching=False, previous=False)
options, args = parser.parse_args(argv)
if not options.input_file:
parser.error('you must specify the input file')
if not options.output_file and not options.output_dir:
parser.error('you must specify the output file or directory')
if options.output_file and not options.locale:
parser.error('you must specify the locale')
if options.no_fuzzy_matching and options.previous:
options.previous = False
po_files = []
if not options.output_file:
if options.locale:
po_files.append((options.locale,
os.path.join(options.output_dir,
options.locale, 'LC_MESSAGES',
options.domain + '.po')))
else:
for locale in os.listdir(options.output_dir):
po_file = os.path.join(options.output_dir, locale,
'LC_MESSAGES',
options.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((options.locale, options.output_file))
domain = options.domain
if not domain:
domain = os.path.splitext(os.path.basename(options.input_file))[0]
infile = open(options.input_file, 'U')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
parser.error('no message catalogs found')
for locale, filename in po_files:
self.log.info('updating catalog %r based on %r', filename,
options.input_file)
infile = open(filename, 'U')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(template, options.no_fuzzy_matching)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'w')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=options.ignore_obsolete,
include_previous=options.previous)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
def main():
return CommandLineInterface().run(sys.argv)
def parse_mapping(fileobj, filename=None):
"""Parse an extraction method mapping from a file-like object.
>>> buf = StringIO('''
... [extractors]
... custom = mypackage.module:myfunc
...
... # Python source files
... [python: **.py]
...
... # Genshi templates
... [genshi: **/templates/**.html]
... include_attrs =
... [genshi: **/templates/**.txt]
... template_class = genshi.template:TextTemplate
... encoding = latin-1
...
... # Some custom extractor
... [custom: **/custom/*.*]
... ''')
>>> method_map, options_map = parse_mapping(buf)
>>> len(method_map)
4
>>> method_map[0]
('**.py', 'python')
>>> options_map['**.py']
{}
>>> method_map[1]
('**/templates/**.html', 'genshi')
>>> options_map['**/templates/**.html']['include_attrs']
''
>>> method_map[2]
('**/templates/**.txt', 'genshi')
>>> options_map['**/templates/**.txt']['template_class']
'genshi.template:TextTemplate'
>>> options_map['**/templates/**.txt']['encoding']
'latin-1'
>>> method_map[3]
('**/custom/*.*', 'mypackage.module:myfunc')
>>> options_map['**/custom/*.*']
{}
:param fileobj: a readable file-like object containing the configuration
text to parse
:return: a `(method_map, options_map)` tuple
:rtype: `tuple`
:see: `extract_from_directory`
"""
extractors = {}
method_map = []
options_map = {}
parser = RawConfigParser()
parser._sections = odict(parser._sections) # We need ordered sections
parser.readfp(fileobj, filename)
for section in parser.sections():
if section == 'extractors':
extractors = dict(parser.items(section))
else:
method, pattern = [part.strip() for part in section.split(':', 1)]
method_map.append((pattern, method))
options_map[pattern] = dict(parser.items(section))
if extractors:
for idx, (pattern, method) in enumerate(method_map):
if method in extractors:
method = extractors[method]
method_map[idx] = (pattern, method)
return (method_map, options_map)
def parse_keywords(strings=[]):
"""Parse keywords specifications from the given list of strings.
>>> kw = parse_keywords(['_', 'dgettext:2', 'dngettext:2,3']).items()
>>> kw.sort()
>>> for keyword, indices in kw:
... print (keyword, indices)
('_', None)
('dgettext', (2,))
('dngettext', (2, 3))
"""
keywords = {}
for string in strings:
if ':' in string:
funcname, indices = string.split(':')
else:
funcname, indices = string, None
if funcname not in keywords:
if indices:
indices = tuple([(int(x)) for x in indices.split(',')])
keywords[funcname] = indices
return keywords
if __name__ == '__main__':
main()
| bsd-3-clause |
pztrick/django-allauth | allauth/socialaccount/providers/vk/provider.py | 1 | 1536 | from allauth.socialaccount import app_settings
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class VKAccount(ProviderAccount):
def get_profile_url(self):
return 'https://vk.com/id%s' % self.account.extra_data.get('uid')
def get_avatar_url(self):
ret = None
photo_big_url = self.account.extra_data.get('photo_big')
photo_medium_url = self.account.extra_data.get('photo_medium')
if photo_big_url:
return photo_big_url
elif photo_medium_url:
return photo_medium_url
else:
return ret
def to_str(self):
first_name = self.account.extra_data.get('first_name', '')
last_name = self.account.extra_data.get('last_name', '')
name = ' '.join([first_name, last_name]).strip()
return name or super(VKAccount, self).to_str()
class VKProvider(OAuth2Provider):
id = 'vk'
name = 'VK'
account_class = VKAccount
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append('email')
return scope
def extract_uid(self, data):
return str(data['uid'])
def extract_common_fields(self, data):
return dict(email=data.get('email'),
last_name=data.get('last_name'),
username=data.get('screen_name'),
first_name=data.get('first_name'))
provider_classes = [VKProvider]
| mit |
savi-dev/horizon | horizon/tabs/base.py | 8 | 16154 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from django.template import TemplateSyntaxError
from django.template.loader import render_to_string
from django.utils.datastructures import SortedDict
from horizon import exceptions
from horizon.utils import html
SEPARATOR = "__"
CSS_TAB_GROUP_CLASSES = ["nav", "nav-tabs", "ajax-tabs"]
CSS_ACTIVE_TAB_CLASSES = ["active"]
CSS_DISABLED_TAB_CLASSES = ["disabled"]
class TabGroup(html.HTMLElement):
"""
A container class which knows how to manage and render
:class:`~horizon.tabs.Tab` objects.
.. attribute:: slug
The URL slug and pseudo-unique identifier for this tab group.
.. attribute:: template_name
The name of the template which will be used to render this tab group.
Default: ``"horizon/common/_tab_group.html"``
.. attribute:: sticky
Boolean to control whether the active tab state should be stored
across requests for a given user. (State storage is all done
client-side.)
.. attribute:: param_name
The name of the GET request parameter which will be used when
requesting specific tab data. Default: ``tab``.
.. attribute:: classes
A list of CSS classes which should be displayed on this tab group.
.. attribute:: attrs
A dictionary of HTML attributes which should be rendered into the
markup for this tab group.
.. attribute:: selected
Read-only property which is set to the instance of the
currently-selected tab if there is one, otherwise ``None``.
.. attribute:: active
Read-only property which is set to the value of the current active tab.
This may not be the same as the value of ``selected`` if no
specific tab was requested via the ``GET`` parameter.
"""
slug = None
template_name = "horizon/common/_tab_group.html"
param_name = 'tab'
sticky = False
_selected = None
_active = None
@property
def selected(self):
return self._selected
@property
def active(self):
return self._active
def __init__(self, request, **kwargs):
super(TabGroup, self).__init__()
if not hasattr(self, "tabs"):
raise NotImplementedError('%s must declare a "tabs" attribute.'
% self.__class__)
self.request = request
self.kwargs = kwargs
self._data = None
tab_instances = []
for tab in self.tabs:
tab_instances.append((tab.slug, tab(self, request)))
self._tabs = SortedDict(tab_instances)
if self.sticky:
self.attrs['data-sticky-tabs'] = 'sticky'
if not self._set_active_tab():
self.tabs_not_available()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def load_tab_data(self):
"""
Preload all data that for the tabs that will be displayed.
"""
for tab in self._tabs.values():
if tab.load and not tab.data_loaded:
try:
tab._data = tab.get_context_data(self.request)
except:
tab._data = False
exceptions.handle(self.request)
def get_id(self):
"""
Returns the id for this tab group. Defaults to the value of the tab
group's :attr:`horizon.tabs.Tab.slug`.
"""
return self.slug
def get_default_classes(self):
"""
Returns a list of the default classes for the tab group. Defaults to
``["nav", "nav-tabs", "ajax-tabs"]``.
"""
default_classes = super(TabGroup, self).get_default_classes()
default_classes.extend(CSS_TAB_GROUP_CLASSES)
return default_classes
def tabs_not_available(self):
"""
In the event that no tabs are either allowed or enabled, this method
is the fallback handler. By default it's a no-op, but it exists
to make redirecting or raising exceptions possible for subclasses.
"""
pass
def _set_active_tab(self):
marked_active = None
# See if we have a selected tab via the GET parameter.
tab = self.get_selected_tab()
if tab:
tab._active = True
self._active = tab
marked_active = tab
# Iterate through to mark them all accordingly.
for tab in self._tabs.values():
if tab._allowed and tab._enabled and not marked_active:
tab._active = True
self._active = tab
marked_active = True
elif tab == marked_active:
continue
else:
tab._active = False
return marked_active
def render(self):
""" Renders the HTML output for this tab group. """
return render_to_string(self.template_name, {"tab_group": self})
def get_tabs(self):
""" Returns a list of the allowed tabs for this tab group. """
return filter(lambda tab: tab._allowed, self._tabs.values())
def get_tab(self, tab_name, allow_disabled=False):
""" Returns a specific tab from this tab group.
If the tab is not allowed or not enabled this method returns ``None``.
If the tab is disabled but you wish to return it anyway, you can pass
``True`` to the allow_disabled argument.
"""
tab = self._tabs.get(tab_name, None)
if tab and tab._allowed and (tab._enabled or allow_disabled):
return tab
return None
def get_loaded_tabs(self):
return filter(lambda t: self.get_tab(t.slug), self._tabs.values())
def get_selected_tab(self):
""" Returns the tab specific by the GET request parameter.
In the event that there is no GET request parameter, the value
of the query parameter is invalid, or the tab is not allowed/enabled,
the return value of this function is None.
"""
selected = self.request.GET.get(self.param_name, None)
if selected:
tab_group, tab_name = selected.split(SEPARATOR)
if tab_group == self.get_id():
self._selected = self.get_tab(tab_name)
return self._selected
class Tab(html.HTMLElement):
"""
A reusable interface for constructing a tab within a
:class:`~horizon.tabs.TabGroup`.
.. attribute:: name
The display name for the tab which will be rendered as the text for
the tab element in the HTML. Required.
.. attribute:: slug
The URL slug and id attribute for the tab. This should be unique for
a given tab group. Required.
.. attribute:: preload
Determines whether the contents of the tab should be rendered into
the page's HTML when the tab group is rendered, or whether it should
be loaded dynamically when the tab is selected. Default: ``True``.
.. attribute:: classes
A list of CSS classes which should be displayed on this tab.
.. attribute:: attrs
A dictionary of HTML attributes which should be rendered into the
markup for this tab.
.. attribute:: load
Read-only access to determine whether or not this tab's data should
be loaded immediately.
"""
name = None
slug = None
preload = True
_active = None
def __init__(self, tab_group, request=None):
super(Tab, self).__init__()
# Priority: constructor, class-defined, fallback
if not self.name:
raise ValueError("%s must have a name." % self.__class__.__name__)
self.name = unicode(self.name) # Force unicode.
if not self.slug:
raise ValueError("%s must have a slug." % self.__class__.__name__)
self.tab_group = tab_group
self.request = request
if request:
self._allowed = self.allowed(request)
self._enabled = self.enabled(request)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def is_active(self):
""" Method to access whether or not this tab is the active tab. """
if self._active is None:
self.tab_group._set_active_tab()
return self._active
@property
def load(self):
load_preloaded = self.preload or self.is_active()
return load_preloaded and self._allowed and self._enabled
@property
def data(self):
if getattr(self, "_data", None) is None:
self._data = self.get_context_data(self.request)
return self._data
@property
def data_loaded(self):
return getattr(self, "_data", None) is not None
def render(self):
"""
Renders the tab to HTML using the
:meth:`~horizon.tabs.Tab.get_context_data` method and
the :meth:`~horizon.tabs.Tab.get_template_name` method.
If :attr:`~horizon.tabs.Tab.preload` is ``False`` and ``force_load``
is not ``True``, or
either :meth:`~horizon.tabs.Tab.allowed` or
:meth:`~horizon.tabs.Tab.enabled` returns ``False`` this method will
return an empty string.
"""
if not self.load:
return ''
try:
context = self.data
except exceptions.Http302:
raise
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
raise TemplateSyntaxError, exc_value, exc_traceback
return render_to_string(self.get_template_name(self.request), context)
def get_id(self):
"""
Returns the id for this tab. Defaults to
``"{{ tab_group.slug }}__{{ tab.slug }}"``.
"""
return SEPARATOR.join([self.tab_group.slug, self.slug])
def get_query_string(self):
return "=".join((self.tab_group.param_name, self.get_id()))
def get_default_classes(self):
"""
Returns a list of the default classes for the tab. Defaults to
and empty list (``[]``), however additional classes may be added
depending on the state of the tab as follows:
If the tab is the active tab for the tab group, in which
the class ``"active"`` will be added.
If the tab is not enabled, the classes the class ``"disabled"``
will be added.
"""
default_classes = super(Tab, self).get_default_classes()
if self.is_active():
default_classes.extend(CSS_ACTIVE_TAB_CLASSES)
if not self._enabled:
default_classes.extend(CSS_DISABLED_TAB_CLASSES)
return default_classes
def get_template_name(self, request):
"""
Returns the name of the template to be used for rendering this tab.
By default it returns the value of the ``template_name`` attribute
on the ``Tab`` class.
"""
if not hasattr(self, "template_name"):
raise AttributeError("%s must have a template_name attribute or "
"override the get_template_name method."
% self.__class__.__name__)
return self.template_name
def get_context_data(self, request):
"""
This method should return a dictionary of context data used to render
the tab. Required.
"""
raise NotImplementedError("%s needs to define a get_context_data "
"method." % self.__class__.__name__)
def enabled(self, request):
"""
Determines whether or not the tab should be accessible
(e.g. be rendered into the HTML on load and respond to a click event).
If a tab returns ``False`` from ``enabled`` it will ignore the value
of ``preload`` and only render the HTML of the tab after being clicked.
The default behavior is to return ``True`` for all cases.
"""
return True
def allowed(self, request):
"""
Determines whether or not the tab is displayed.
Tab instances can override this method to specify conditions under
which this tab should not be shown at all by returning ``False``.
The default behavior is to return ``True`` for all cases.
"""
return True
class TableTab(Tab):
"""
A :class:`~horizon.tabs.Tab` class which knows how to deal with
:class:`~horizon.tables.DataTable` classes rendered inside of it.
This distinct class is required due to the complexity involved in handling
both dynamic tab loading, dynamic table updating and table actions all
within one view.
.. attribute:: table_classes
An iterable containing the :class:`~horizon.tables.DataTable` classes
which this tab will contain. Equivalent to the
:attr:`~horizon.tables.MultiTableView.table_classes` attribute on
:class:`~horizon.tables.MultiTableView`. For each table class you
need to define a corresponding ``get_{{ table_name }}_data`` method
as with :class:`~horizon.tables.MultiTableView`.
"""
table_classes = None
def __init__(self, tab_group, request):
super(TableTab, self).__init__(tab_group, request)
if not self.table_classes:
class_name = self.__class__.__name__
raise NotImplementedError("You must define a table_class "
"attribute on %s" % class_name)
# Instantiate our table classes but don't assign data yet
table_instances = [(table._meta.name,
table(request, **tab_group.kwargs))
for table in self.table_classes]
self._tables = SortedDict(table_instances)
self._table_data_loaded = False
def load_table_data(self):
"""
Calls the ``get_{{ table_name }}_data`` methods for each table class
and sets the data on the tables.
"""
# We only want the data to be loaded once, so we track if we have...
if not self._table_data_loaded:
for table_name, table in self._tables.items():
# Fetch the data function.
func_name = "get_%s_data" % table_name
data_func = getattr(self, func_name, None)
if data_func is None:
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"on %s." % (func_name, cls_name))
# Load the data.
table.data = data_func()
# Mark our data as loaded so we don't run the loaders again.
self._table_data_loaded = True
def get_context_data(self, request):
"""
Adds a ``{{ table_name }}_table`` item to the context for each table
in the :attr:`~horizon.tabs.TableTab.table_classes` attribute.
If only one table class is provided, a shortcut ``table`` context
variable is also added containing the single table.
"""
context = {}
# If the data hasn't been manually loaded before now,
# make certain it's loaded before setting the context.
self.load_table_data()
for table_name, table in self._tables.items():
# If there's only one table class, add a shortcut name as well.
if len(self.table_classes) == 1:
context["table"] = table
context["%s_table" % table_name] = table
return context
| apache-2.0 |
heijingjie/fs_linux_3.10.58 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
shanot/imp | tools/w32/add_search_path.py | 1 | 3669 | #!/usr/bin/env python
"""Patch IMP and RMF SWIG wrappers to search for Python extensions and DLLs
in Python version-specific directories. These directories are created by
the .exe Windows installers, and are not in the standard Python search
path, so need to be added. We need to patch IMP/__init__.py so we add
paths before any usage of any IMP module, and RMF.py too in case RMF
is imported before IMP.
Note that we used to simply use the 'patch' utility to do this, but the
SWIG folks changed the header of their output files, which confused patch.
Instead, we look for import lines, and add our code after the first block
of imports (which import standard Python modules such as 'sys').
This ensures that the search path is properly set up before we try to
import IMP/RMF extensions, but doesn't come before the comment header
or any __future__ imports (which must come first).
"""
from __future__ import print_function
import re
import sys
IMP_PATCH = r"""
def _add_pyext_to_path():
import os.path
import sys
# Get directory containing IMP's __init__.py
imp_dir = os.path.abspath(os.path.dirname(__file__))
# Make sure we're on a Windows system
if hasattr(sys, 'dllhandle') and len(imp_dir) > 4:
# Strip '\IMP' suffix from directory
pydir = imp_dir[:-4]
# Add Python version-specific directory to search path
pyextdir = pydir + '\\python%d.%d' % sys.version_info[:2]
if pyextdir not in sys.path:
sys.path.insert(1, pyextdir)
# Strip '\python\IMP' suffix to get directory containing DLLs
dlldir = imp_dir[:-11] + '\\bin'
# Add DLL directory to PATH so Windows can find them
if dlldir not in os.environ['PATH']:
os.environ['PATH'] = dlldir + ';' + os.environ['PATH']
_add_pyext_to_path()
"""
RMF_PATCH = r"""
def _add_pyext_to_path():
import os.path
import sys
# Get directory containing RMF.py
rmf_dir = os.path.abspath(os.path.dirname(__file__))
# Make sure we're on a Windows system
if hasattr(sys, 'dllhandle') and len(rmf_dir) > 4:
# Add Python version-specific directory to search path
pyextdir = rmf_dir + '\\python%d.%d' % sys.version_info[:2]
if pyextdir not in sys.path:
sys.path.insert(1, pyextdir)
# Strip '\python' suffix to get directory containing DLLs
dlldir = rmf_dir[:-7] + '\\bin'
# Add DLL directory to PATH so Windows can find them
if dlldir not in os.environ['PATH']:
os.environ['PATH'] = dlldir + ';' + os.environ['PATH']
_add_pyext_to_path()
"""
def add_search_path(filename):
patch = RMF_PATCH if 'RMF' in filename else IMP_PATCH
with open(filename) as fh:
contents = fh.readlines()
# An 'import block' is considered to be a set of lines beginning with
# 'from' or 'import' statements. Any blank lines or comments are considered
# to be part of the block.
r = re.compile('(from|import) ')
non_statement = re.compile('(\s*$|\s*#)')
in_imports = False
imports_done = False
with open(filename, 'w') as fh:
for line in contents:
if not imports_done:
if not in_imports and r.match(line):
in_imports = True
elif in_imports and not r.match(line) \
and not non_statement.match(line):
fh.write(patch)
in_imports = False
imports_done = True
fh.write(line)
def main():
for fname in sys.argv[1:]:
add_search_path(fname)
if __name__ == '__main__':
main()
| gpl-3.0 |
jabdoa2/FreeCAD | src/Mod/PartDesign/Scripts/FilletArc.py | 27 | 2819 | #! python
# -*- coding: utf-8 -*-
# (c) 2010 Werner Mayer LGPL
__author__ = "Werner Mayer <wmayer[at]users.sourceforge.net>"
# Formulas:
# M2 = P + b*r2 + t*u
# S1 = (r2*M1 + r1*M2)/(r1+r2)
# S2 = M2-b*r2
import math
# 3d vector class
class Vector:
def __init__(self,x,y,z):
self.x=x
self.y=y
self.z=z
def add(self,vec):
return Vector(self.x+vec.x,self.y+vec.y,self.z+vec.z)
def sub(self,vec):
return Vector(self.x-vec.x,self.y-vec.y,self.z-vec.z)
def dot(self,vec):
return self.x*vec.x+self.y*vec.y+self.z*vec.z
def mult(self,s):
return Vector(self.x*s,self.y*s,self.z*s)
def cross(self,vec):
return Vector(
self.y * vec.z - self.z * vec.y,
self.z * vec.x - self.x * vec.z,
self.x * vec.y - self.y * vec.x)
def length(self):
return math.sqrt(self.x*self.x+self.y*self.y+self.z*self.z)
def norm(self):
l = self.length()
if l > 0:
self.x /= l
self.y /= l
self.z /= l
def __repr__(self):
return "(%f,%f,%f)" % (self.x,self.y,self.z)
# A signum function
def sgn(val):
if val > 0:
return 1
elif val < 0:
return -1
else:
return 0
# M1 ... is the center of the arc
# P ... is the end point of the arc and start point of the line
# Q .. is a second point on the line
# N ... is the normal of the plane where the arc and the line lie on, usually N=(0,0,1)
# r2 ... the fillet radius
# ccw ... counter-clockwise means which part of the arc is given. ccw must be either True or False
def makeFilletArc(M1,P,Q,N,r2,ccw):
u = Q.sub(P)
v = P.sub(M1)
if ccw:
b = u.cross(N)
else:
b = N.cross(u)
b.norm()
uu = u.dot(u)
uv = u.dot(v)
r1 = v.length()
# distinguish between internal and external fillets
r2 *= sgn(uv);
cc = 2.0 * r2 * (b.dot(v)-r1)
dd = uv * uv - uu * cc
if dd < 0:
raise RuntimeError("Unable to caluclate intersection points")
t1 = (-uv + math.sqrt(dd)) / uu
t2 = (-uv - math.sqrt(dd)) / uu
if (abs(t1) < abs(t2)):
t = t1
else:
t = t2
br2 = b.mult(r2)
print br2
ut = u.mult(t)
print ut
M2 = P.add(ut).add(br2)
S1 = M1.mult(r2/(r1+r2)).add(M2.mult(r1/(r1+r2)))
S2 = M2.sub(br2)
return (S1,S2,M2)
def test():
from FreeCAD import Base
import Part
P1=Base.Vector(1,-5,0)
P2=Base.Vector(-5,2,0)
P3=Base.Vector(1,5,0)
#Q=Base.Vector(5,10,0)
#Q=Base.Vector(5,11,0)
Q=Base.Vector(5,0,0)
r2=3.0
axis=Base.Vector(0,0,1)
ccw=False
arc=Part.ArcOfCircle(P1,P2,P3)
C=arc.Center
Part.show(Part.makeLine(P3,Q))
Part.show(arc.toShape())
(S1,S2,M2) = makeArc(Vector(C.x,C.y,C.z),Vector(P3.x,P3.y,P3.z),Vector(Q.x,Q.y,Q.z),Vector(axis.x,axis.y,axis.z),r2,ccw)
circle=Part.Circle(Base.Vector(M2.x,M2.y,M2.z), Base.Vector(0,0,1), math.fabs(r2))
Part.show(circle.toShape())
| lgpl-2.1 |
amenonsen/ansible | test/units/modules/network/fortios/test_fortios_vpn_ssl_web_user_group_bookmark.py | 21 | 7790 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_vpn_ssl_web_user_group_bookmark
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_vpn_ssl_web_user_group_bookmark.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_vpn_ssl_web_user_group_bookmark_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_group_bookmark': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_ssl_web_user_group_bookmark_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_group_bookmark': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_ssl_web_user_group_bookmark_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_ssl_web_user_group_bookmark': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_ssl_web_user_group_bookmark_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_ssl_web_user_group_bookmark': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_ssl_web_user_group_bookmark_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_group_bookmark': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_vpn_ssl_web_user_group_bookmark_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_group_bookmark': {
'random_attribute_not_valid': 'tag', 'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
sylarcp/anita | venv/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/base.py | 21 | 58663 | # mssql/base.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql
:name: Microsoft SQL Server
Auto Increment Behavior
-----------------------
SQL Server provides so-called "auto incrementing" behavior using the
``IDENTITY`` construct, which can be placed on an integer primary key.
SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior,
described at :paramref:`.Column.autoincrement`; this means
that by default, the first integer primary key column in a :class:`.Table`
will be considered to be the identity column and will generate DDL as such::
from sqlalchemy import Table, MetaData, Column, Integer
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
The above example will generate DDL as:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
For the case where this default generation of ``IDENTITY`` is not desired,
specify ``autoincrement=False`` on all integer primary key columns::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer))
m.create_all(engine)
.. note::
An INSERT statement which refers to an explicit value for such
a column is prohibited by SQL Server, however SQLAlchemy will detect this
and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution
time. As this is not a high performing process, care should be taken to
set the ``autoincrement`` flag appropriately for columns that will not
actually require IDENTITY behavior.
Controlling "Start" and "Increment"
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specific control over the parameters of the ``IDENTITY`` value is supported
using the :class:`.schema.Sequence` object. While this object normally
represents an explicit "sequence" for supporting backends, on SQL Server it is
re-purposed to specify behavior regarding the identity column, including
support of the "start" and "increment" values::
from sqlalchemy import Table, Integer, Sequence, Column
Table('test', metadata,
Column('id', Integer,
Sequence('blah', start=100, increment=10),
primary_key=True),
Column('name', String(20))
).create(some_engine)
would yield:
.. sourcecode:: sql
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
Note that the ``start`` and ``increment`` values for sequences are
optional and will default to 1,1.
INSERT behavior
^^^^^^^^^^^^^^^^
Handling of the ``IDENTITY`` column at INSERT time involves two key
techniques. The most common is being able to fetch the "last inserted value"
for a given ``IDENTITY`` column, a process which SQLAlchemy performs
implicitly in many cases, most importantly within the ORM.
The process for fetching this value has several variants:
* In the vast majority of cases, RETURNING is used in conjunction with INSERT
statements on SQL Server in order to get newly generated primary key values:
.. sourcecode:: sql
INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
* When RETURNING is not available or has been disabled via
``implicit_returning=False``, either the ``scope_identity()`` function or
the ``@@identity`` variable is used; behavior varies by backend:
* when using PyODBC, the phrase ``; select scope_identity()`` will be
appended to the end of the INSERT statement; a second result set will be
fetched in order to receive the value. Given a table as::
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer),
implicit_returning=False)
an INSERT will look like:
.. sourcecode:: sql
INSERT INTO t (x) VALUES (?); select scope_identity()
* Other dialects such as pymssql will call upon
``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
statement. If the flag ``use_scope_identity=False`` is passed to
:func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid``
is used instead.
A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
that refers to the identity column explicitly. The SQLAlchemy dialect will
detect when an INSERT construct, created using a core :func:`.insert`
construct (not a plain string SQL), refers to the identity column, and
in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
execution. Given this example::
m = MetaData()
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
engine.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
The above column will be created with IDENTITY, however the INSERT statement
we emit is specifying explicit values. In the echo output we can see
how SQLAlchemy handles this:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
COMMIT
SET IDENTITY_INSERT t ON
INSERT INTO t (id, x) VALUES (?, ?)
((1, 1), (2, 2))
SET IDENTITY_INSERT t OFF
COMMIT
This
is an auxilliary use case suitable for testing and bulk insert scenarios.
Collation Support
-----------------
Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`.Table`, the
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
.. versionadded:: 0.8 Character collations are now part of the base string
types.
LIMIT/OFFSET Support
--------------------
MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
will yield::
SELECT TOP n
If using SQL Server 2005 or above, LIMIT with OFFSET
support is available through the ``ROW_NUMBER OVER`` construct.
For versions below 2005, LIMIT with OFFSET usage will fail.
Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
name VARCHAR(20)
If ``nullable`` is ``True`` or ``False`` then the column will be
``NULL` or ``NOT NULL`` respectively.
Date / Time Handling
--------------------
DATE and TIME are supported. Bind parameters are converted
to datetime.datetime() objects as required by most MSSQL drivers,
and results are processed from strings if needed.
The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
.. _mssql_indexes:
Clustered Index Support
-----------------------
The MSSQL dialect supports clustered indexes (and primary keys) via the
``mssql_clustered`` option. This option is available to :class:`.Index`,
:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
To generate a clustered index::
Index("my_index", table.c.x, mssql_clustered=True)
which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
.. versionadded:: 0.8
To generate a clustered primary key use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
.. versionadded:: 0.9.2
MSSQL-Specific Index Options
-----------------------------
In addition to clustering, the MSSQL dialect supports other special options
for :class:`.Index`.
INCLUDE
^^^^^^^
The ``mssql_include`` option renders INCLUDE(colname) for the given string
names::
Index("my_index", table.c.x, mssql_include=['y'])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
.. versionadded:: 0.8
Index ordering
^^^^^^^^^^^^^^
Index ordering is available via functional expressions, such as::
Index("my_index", table.c.x.desc())
would render the index as ``CREATE INDEX my_index ON table (x DESC)``
.. versionadded:: 0.8
.. seealso::
:ref:`schema_indexes_functional`
Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatible with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibility level information. Because of this, if running under
a backwards compatibility mode SQAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
Triggers
--------
SQLAlchemy by default uses OUTPUT INSERTED to get at newly
generated primary key values via IDENTITY columns or other
server side defaults. MS-SQL does not
allow the usage of OUTPUT INSERTED on tables that have triggers.
To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`.Table`
which has triggers::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {'implicit_returning':False}
This option can also be specified engine-wide using the
``implicit_returning=False`` argument on :func:`.create_engine`.
Enabling Snapshot Isolation
---------------------------
Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
Background on SQL Server snapshot isolation is available at
http://msdn.microsoft.com/en-us/library/ms175095.aspx.
Known Issues
------------
* No support for more than one ``IDENTITY`` column per table
* reflection of indexes does not work with versions older than
SQL Server 2005
"""
import datetime
import operator
import re
from ... import sql, schema as sa_schema, exc, util
from ...sql import compiler, expression, \
util as sql_util, cast
from ... import engine
from ...engine import reflection, default
from ... import types as sqltypes
from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
VARBINARY, TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
from ...util import update_wrapper
from . import information_schema as ischema
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
RESERVED_WORDS = set(
['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
'containstable', 'continue', 'convert', 'create', 'cross', 'current',
'current_date', 'current_time', 'current_timestamp', 'current_user',
'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
'reconfigure', 'references', 'replication', 'restore', 'restrict',
'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
'writetext',
])
class REAL(sqltypes.REAL):
__visit_name__ = 'REAL'
def __init__(self, **kw):
# REAL is a synonym for FLOAT(24) on SQL server
kw['precision'] = 24
super(REAL, self).__init__(**kw)
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
# MSSQL DATE/TIME types have varied behavior, sometimes returning
# strings. MSDate/TIME check for everything, and always
# filter bind parameters into datetime objects (required by pyodbc,
# not sure about other dialects).
class _MSDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, util.string_types):
return datetime.date(*[
int(x or 0)
for x in self._reg.match(value).groups()
])
else:
return value
return process
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
__zero_date = datetime.date(1900, 1, 1)
def bind_processor(self, dialect):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
self.__zero_date, value.time())
elif isinstance(value, datetime.time):
value = datetime.datetime.combine(self.__zero_date, value)
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, util.string_types):
return datetime.time(*[
int(x or 0)
for x in self._reg.match(value).groups()])
else:
return value
return process
_MSTime = TIME
class _DateTimeBase(object):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
pass
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'SMALLDATETIME'
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'DATETIME2'
def __init__(self, precision=None, **kw):
super(DATETIME2, self).__init__(**kw)
self.precision = precision
# TODO: is this not an Interval ?
class DATETIMEOFFSET(sqltypes.TypeEngine):
__visit_name__ = 'DATETIMEOFFSET'
def __init__(self, precision=None, **kwargs):
self.precision = precision
class _StringType(object):
"""Base for MSSQL string types."""
def __init__(self, collation=None):
super(_StringType, self).__init__(collation=collation)
class NTEXT(sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = 'NTEXT'
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = 'MONEY'
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = 'SMALLMONEY'
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class SQL_VARIANT(sqltypes.TypeEngine):
__visit_name__ = 'SQL_VARIANT'
# old names.
MSDateTime = _MSDateTime
MSDate = _MSDate
MSReal = REAL
MSTinyInteger = TINYINT
MSTime = TIME
MSSmallDateTime = SMALLDATETIME
MSDateTime2 = DATETIME2
MSDateTimeOffset = DATETIMEOFFSET
MSText = TEXT
MSNText = NTEXT
MSString = VARCHAR
MSNVarchar = NVARCHAR
MSChar = CHAR
MSNChar = NCHAR
MSBinary = BINARY
MSVarBinary = VARBINARY
MSImage = IMAGE
MSBit = BIT
MSMoney = MONEY
MSSmallMoney = SMALLMONEY
MSUniqueIdentifier = UNIQUEIDENTIFIER
MSVariant = SQL_VARIANT
ischema_names = {
'int': INTEGER,
'bigint': BIGINT,
'smallint': SMALLINT,
'tinyint': TINYINT,
'varchar': VARCHAR,
'nvarchar': NVARCHAR,
'char': CHAR,
'nchar': NCHAR,
'text': TEXT,
'ntext': NTEXT,
'decimal': DECIMAL,
'numeric': NUMERIC,
'float': FLOAT,
'datetime': DATETIME,
'datetime2': DATETIME2,
'datetimeoffset': DATETIMEOFFSET,
'date': DATE,
'time': TIME,
'smalldatetime': SMALLDATETIME,
'binary': BINARY,
'varbinary': VARBINARY,
'bit': BIT,
'real': REAL,
'image': IMAGE,
'timestamp': TIMESTAMP,
'money': MONEY,
'smallmoney': SMALLMONEY,
'uniqueidentifier': UNIQUEIDENTIFIER,
'sql_variant': SQL_VARIANT,
}
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
"""
if getattr(type_, 'collation', None):
collation = 'COLLATE %s' % type_.collation
else:
collation = None
if not length:
length = type_.length
if length:
spec = spec + "(%s)" % length
return ' '.join([c for c in (spec, collation)
if c is not None])
def visit_FLOAT(self, type_):
precision = getattr(type_, 'precision', None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': precision}
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_):
if type_.precision:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_):
precision = getattr(type_, 'precision', None)
if precision:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_DATETIME2(self, type_):
precision = getattr(type_, 'precision', None)
if precision:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
def visit_SMALLDATETIME(self, type_):
return "SMALLDATETIME"
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_unicode_text(self, type_):
return self.visit_NTEXT(type_)
def visit_NTEXT(self, type_):
return self._extend("NTEXT", type_)
def visit_TEXT(self, type_):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_):
return self._extend("VARCHAR", type_, length=type_.length or 'max')
def visit_CHAR(self, type_):
return self._extend("CHAR", type_)
def visit_NCHAR(self, type_):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_):
return self._extend("NVARCHAR", type_, length=type_.length or 'max')
def visit_date(self, type_):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_)
else:
return self.visit_DATE(type_)
def visit_time(self, type_):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_)
else:
return self.visit_TIME(type_)
def visit_large_binary(self, type_):
return self.visit_IMAGE(type_)
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_VARBINARY(self, type_):
return self._extend(
"VARBINARY",
type_,
length=type_.length or 'max')
def visit_boolean(self, type_):
return self.visit_BIT(type_)
def visit_BIT(self, type_):
return "BIT"
def visit_MONEY(self, type_):
return "MONEY"
def visit_SMALLMONEY(self, type_):
return 'SMALLMONEY'
def visit_UNIQUEIDENTIFIER(self, type_):
return "UNIQUEIDENTIFIER"
def visit_SQL_VARIANT(self, type_):
return 'SQL_VARIANT'
class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_result_proxy = None
_lastrowid = None
def _opt_encode(self, statement):
if not self.dialect.supports_unicode_statements:
return self.dialect._encoder(statement)[0]
else:
return statement
def pre_exec(self):
"""Activate IDENTITY_INSERT if needed."""
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0] or \
(
self.compiled.statement.parameters and (
(
self.compiled.statement._has_multi_parameters
and
seq_column.key in
self.compiled.statement.parameters[0]
) or (
not
self.compiled.statement._has_multi_parameters
and
seq_column.key in
self.compiled.statement.parameters
)
)
)
else:
self._enable_identity_insert = False
self._select_lastrowid = insert_has_sequence and \
not self.compiled.returning and \
not self._enable_identity_insert and \
not self.executemany
if self._enable_identity_insert:
self.root_connection._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl)),
(),
self)
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
conn = self.root_connection
if self._select_lastrowid:
if self.dialect.use_scope_identity:
conn._cursor_execute(
self.cursor,
"SELECT scope_identity() AS lastrowid", (), self)
else:
conn._cursor_execute(self.cursor,
"SELECT @@identity AS lastrowid",
(),
self)
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
conn._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer. format_table(
self.compiled.statement.table)),
(),
self)
def get_lastrowid(self):
return self._lastrowid
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
self.cursor.execute(
self._opt_encode(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer. format_table(
self.compiled.statement.table)))
except:
pass
def get_result_proxy(self):
if self._result_proxy:
return self._result_proxy
else:
return engine.ResultProxy(self)
class MSSQLCompiler(compiler.SQLCompiler):
returning_precedes_values = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond',
'microseconds': 'microsecond'
})
def __init__(self, *args, **kwargs):
self.tablealiases = {}
super(MSSQLCompiler, self).__init__(*args, **kwargs)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_current_date_func(self, fn, **kw):
return "GETDATE()"
def visit_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_char_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_concat_op_binary(self, binary, operator, **kw):
return "%s + %s" % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def get_select_precolumns(self, select):
""" MS-SQL puts TOP, its version of LIMIT, here """
if select._distinct or select._limit is not None:
s = select._distinct and "DISTINCT " or ""
# ODBC drivers and possibly others
# don't support bind params in the SELECT clause on SQL Server.
# so have to use literal here.
if select._limit is not None:
if not select._offset:
s += "TOP %d " % select._limit
return s
return compiler.SQLCompiler.get_select_precolumns(self, select)
def get_from_hint_text(self, table, text):
return text
def get_crud_hint_text(self, table, text):
return text
def limit_clause(self, select):
# Limit in mssql is after the select keyword
return ""
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if select._offset and not getattr(select, '_mssql_visit', None):
# to use ROW_NUMBER(), an ORDER BY is required.
if not select._order_by_clause.clauses:
raise exc.CompileError('MSSQL requires an order_by when '
'using an offset.')
_offset = select._offset
_limit = select._limit
_order_by_clauses = select._order_by_clause.clauses
select = select._generate()
select._mssql_visit = True
select = select.column(
sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
.label("mssql_rn")
).order_by(None).alias()
mssql_rn = sql.column('mssql_rn')
limitselect = sql.select([c for c in select.c if
c.key != 'mssql_rn'])
limitselect.append_whereclause(mssql_rn > _offset)
if _limit is not None:
limitselect.append_whereclause(mssql_rn <= (_limit + _offset))
return self.process(limitselect, iswrapper=True, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def _schema_aliased_table(self, table):
if getattr(table, 'schema', None) is not None:
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
else:
return None
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
if mssql_aliased is table or iscrud:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=table, **kwargs)
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
def visit_alias(self, alias, **kwargs):
# translate for schema-qualified table aliases
kwargs['mssql_aliased'] = alias.original
return super(MSSQLCompiler, self).visit_alias(alias, **kwargs)
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
return "SAVE TRANSACTION %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return ("ROLLBACK TRANSACTION %s"
% self.preparer.format_savepoint(savepoint_stmt))
def visit_column(self, column, add_to_result_map=None, **kwargs):
if column.table is not None and \
(not self.isupdate and not self.isdelete) or \
self.is_subquery():
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = expression._corresponding_column_or_error(
t, column)
if add_to_result_map is not None:
add_to_result_map(
column.name,
column.name,
(column, column.name, column.key),
column.type
)
return super(MSSQLCompiler, self).\
visit_column(converted, **kwargs)
return super(MSSQLCompiler, self).visit_column(
column, add_to_result_map=add_to_result_map, **kwargs)
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
possible.
"""
if (
isinstance(binary.left, expression.BindParameter)
and binary.operator == operator.eq
and not isinstance(binary.right, expression.BindParameter)
):
return self.process(
expression.BinaryExpression(binary.right,
binary.left,
binary.operator),
**kwargs)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
if self.isinsert or self.isupdate:
target = stmt.table.alias("inserted")
else:
target = stmt.table.alias("deleted")
adapter = sql_util.ClauseAdapter(target)
columns = [
self._label_select_column(None, adapter.traverse(c),
True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'OUTPUT ' + ', '.join(columns)
def get_cte_preamble(self, recursive):
# SQL Server finds it too inconvenient to accept
# an entirely optional, SQL standard specified,
# "RECURSIVE" word with their "WITH",
# so here we go
return "WITH"
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label(None)
else:
return super(MSSQLCompiler, self).\
label_select_column(select, column, asfrom)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
order_by = self.process(select._order_by_clause, **kw)
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Render the UPDATE..FROM clause specific to MSSQL.
In MSSQL, if the UPDATE statement involves an alias of the table to
be updated, then the table itself must be added to the FROM list as
well. Otherwise, it is optional. Here, we add it regardless.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in [from_table] + extra_froms)
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
A dialect may use this compiler on a platform where native
binds are used.
"""
ansi_bind_rules = True
def visit_in_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_notin_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def render_literal_value(self, value, type_):
"""
For date and datetime values, convert to a string
format acceptable to MSSQL. That seems to be the
so-called ODBC canonical date format which looks
like this:
yyyy-mm-dd hh:mi:ss.mmm(24h)
For other data types, call the base class implementation.
"""
# datetime and date are both subclasses of datetime.date
if issubclass(type(value), datetime.date):
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).\
render_literal_value(value, type_)
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (self.preparer.format_column(column) + " "
+ self.dialect.type_compiler.process(column.type))
if column.nullable is not None:
if not column.nullable or column.primary_key or \
isinstance(column.default, sa_schema.Sequence):
colspec += " NOT NULL"
else:
colspec += " NULL"
if column.table is None:
raise exc.CompileError(
"mssql requires Table-bound columns "
"in order to generate DDL")
# install an IDENTITY Sequence if we either a sequence or an implicit
# IDENTITY column
if isinstance(column.default, sa_schema.Sequence):
if column.default.start == 0:
start = 0
else:
start = column.default.start or 1
colspec += " IDENTITY(%s,%s)" % (start,
column.default.increment or 1)
elif column is column.table._autoincrement_column:
colspec += " IDENTITY(1,1)"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_create_index(self, create, include_schema=False):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
# handle clustering option
if index.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table),
', '.join(
self.sql_compiler.process(expr,
include_table=False,
literal_binds=True) for
expr in index.expressions)
)
# handle other included columns
if index.dialect_options['mssql']['include']:
inclusions = [index.table.c[col]
if isinstance(col, util.string_types) else col
for col in
index.dialect_options['mssql']['include']
]
text += " INCLUDE (%s)" \
% ', '.join([preparer.quote(c.name)
for c in inclusions])
return text
def visit_drop_index(self, drop):
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(drop.element, include_schema=False),
self.preparer.format_table(drop.element.table)
)
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "PRIMARY KEY "
if constraint.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "UNIQUE "
if constraint.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
final_quote=']')
def _escape_identifier(self, value):
return value
def quote_schema(self, schema, force=None):
"""Prepare a quoted table and schema name."""
result = '.'.join([self.quote(x, force) for x in schema.split('.')])
return result
def _db_plus_owner_listing(fn):
def wrap(dialect, connection, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
def _db_plus_owner(fn):
def wrap(dialect, connection, tablename, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
tablename, dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
def _switch_db(dbname, connection, fn, *arg, **kw):
if dbname:
current_db = connection.scalar("select db_name()")
connection.execute("use %s" % dbname)
try:
return fn(*arg, **kw)
finally:
if dbname:
connection.execute("use %s" % current_db)
def _owner_plus_db(dialect, schema):
if not schema:
return None, dialect.default_schema_name
elif "." in schema:
return schema.split(".", 1)
else:
return None, schema
class MSDialect(default.DefaultDialect):
name = 'mssql'
supports_default_values = True
supports_empty_insert = False
execution_ctx_cls = MSExecutionContext
use_scope_identity = True
max_identifier_length = 128
schema_name = "dbo"
colspecs = {
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate,
sqltypes.Time: TIME,
}
ischema_names = ischema_names
supports_native_boolean = False
supports_unicode_binds = True
postfetch_lastrowid = True
server_version_info = ()
statement_compiler = MSSQLCompiler
ddl_compiler = MSDDLCompiler
type_compiler = MSTypeCompiler
preparer = MSIdentifierPreparer
construct_arguments = [
(sa_schema.PrimaryKeyConstraint, {
"clustered": False
}),
(sa_schema.UniqueConstraint, {
"clustered": False
}),
(sa_schema.Index, {
"clustered": False,
"include": None
})
]
def __init__(self,
query_timeout=None,
use_scope_identity=True,
max_identifier_length=None,
schema_name="dbo", **opts):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.max_identifier_length = int(max_identifier_length or 0) or \
self.max_identifier_length
super(MSDialect, self).__init__(**opts)
def do_savepoint(self, connection, name):
# give the DBAPI a push
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
super(MSDialect, self).do_savepoint(connection, name)
def do_release_savepoint(self, connection, name):
# SQL Server does not support RELEASE SAVEPOINT
pass
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
if self.server_version_info[0] not in list(range(8, 17)):
# FreeTDS with version 4.2 seems to report here
# a number like "95.10.255". Don't know what
# that is. So emit warning.
util.warn(
"Unrecognized server version info '%s'. Version specific "
"behaviors may not function properly. If using ODBC "
"with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, "
"is configured in the FreeTDS configuration." %
".".join(str(x) for x in self.server_version_info))
if self.server_version_info >= MS_2005_VERSION and \
'implicit_returning' not in self.__dict__:
self.implicit_returning = True
if self.server_version_info >= MS_2008_VERSION:
self.supports_multivalues_insert = True
def _get_default_schema_name(self, connection):
if self.server_version_info < MS_2005_VERSION:
return self.schema_name
query = sql.text("""
SELECT default_schema_name FROM
sys.database_principals
WHERE principal_id=database_principal_id()
""")
default_schema_name = connection.scalar(query)
if default_schema_name is not None:
return util.text_type(default_schema_name)
else:
return self.schema_name
@_db_plus_owner
def has_table(self, connection, tablename, dbname, owner, schema):
columns = ischema.columns
whereclause = columns.c.table_name == tablename
if owner:
whereclause = sql.and_(whereclause,
columns.c.table_schema == owner)
s = sql.select([columns], whereclause)
c = connection.execute(s)
return c.first() is not None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select([ischema.schemata.c.schema_name],
order_by=[ischema.schemata.c.schema_name]
)
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@reflection.cache
@_db_plus_owner_listing
def get_table_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == 'BASE TABLE'
),
order_by=[tables.c.table_name]
)
table_names = [r[0] for r in connection.execute(s)]
return table_names
@reflection.cache
@_db_plus_owner_listing
def get_view_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == 'VIEW'
),
order_by=[tables.c.table_name]
)
view_names = [r[0] for r in connection.execute(s)]
return view_names
@reflection.cache
@_db_plus_owner
def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
# using system catalogs, don't support index reflection
# below MS 2005
if self.server_version_info < MS_2005_VERSION:
return []
rp = connection.execute(
sql.text("select ind.index_id, ind.is_unique, ind.name "
"from sys.indexes as ind join sys.tables as tab on "
"ind.object_id=tab.object_id "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name = :tabname "
"and sch.name=:schname "
"and ind.is_primary_key=0",
bindparams=[
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
],
typemap={
'name': sqltypes.Unicode()
}
)
)
indexes = {}
for row in rp:
indexes[row['index_id']] = {
'name': row['name'],
'unique': row['is_unique'] == 1,
'column_names': []
}
rp = connection.execute(
sql.text(
"select ind_col.index_id, ind_col.object_id, col.name "
"from sys.columns as col "
"join sys.tables as tab on tab.object_id=col.object_id "
"join sys.index_columns as ind_col on "
"(ind_col.column_id=col.column_id and "
"ind_col.object_id=tab.object_id) "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name=:tabname "
"and sch.name=:schname",
bindparams=[
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
],
typemap={'name': sqltypes.Unicode()}
),
)
for row in rp:
if row['index_id'] in indexes:
indexes[row['index_id']]['column_names'].append(row['name'])
return list(indexes.values())
@reflection.cache
@_db_plus_owner
def get_view_definition(self, connection, viewname,
dbname, owner, schema, **kw):
rp = connection.execute(
sql.text(
"select definition from sys.sql_modules as mod, "
"sys.views as views, "
"sys.schemas as sch"
" where "
"mod.object_id=views.object_id and "
"views.schema_id=sch.schema_id and "
"views.name=:viewname and sch.name=:schname",
bindparams=[
sql.bindparam('viewname', viewname,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
]
)
)
if rp:
view_def = rp.scalar()
return view_def
@reflection.cache
@_db_plus_owner
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
# Get base columns
columns = ischema.columns
if owner:
whereclause = sql.and_(columns.c.table_name == tablename,
columns.c.table_schema == owner)
else:
whereclause = columns.c.table_name == tablename
s = sql.select([columns], whereclause,
order_by=[columns.c.ordinal_position])
c = connection.execute(s)
cols = []
while True:
row = c.fetchone()
if row is None:
break
(name, type, nullable, charlen,
numericprec, numericscale, default, collation) = (
row[columns.c.column_name],
row[columns.c.data_type],
row[columns.c.is_nullable] == 'YES',
row[columns.c.character_maximum_length],
row[columns.c.numeric_precision],
row[columns.c.numeric_scale],
row[columns.c.column_default],
row[columns.c.collation_name]
)
coltype = self.ischema_names.get(type, None)
kwargs = {}
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
kwargs['length'] = charlen
if collation:
kwargs['collation'] = collation
if coltype == MSText or \
(coltype in (MSString, MSNVarchar) and charlen == -1):
kwargs.pop('length')
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'" %
(type, name))
coltype = sqltypes.NULLTYPE
else:
if issubclass(coltype, sqltypes.Numeric) and \
coltype is not MSReal:
kwargs['scale'] = numericscale
kwargs['precision'] = numericprec
coltype = coltype(**kwargs)
cdict = {
'name': name,
'type': coltype,
'nullable': nullable,
'default': default,
'autoincrement': False,
}
cols.append(cdict)
# autoincrement and identity
colmap = {}
for col in cols:
colmap[col['name']] = col
# We also run an sp_columns to check for identity columns:
cursor = connection.execute("sp_columns @table_name = '%s', "
"@table_owner = '%s'"
% (tablename, owner))
ic = None
while True:
row = cursor.fetchone()
if row is None:
break
(col_name, type_name) = row[3], row[5]
if type_name.endswith("identity") and col_name in colmap:
ic = col_name
colmap[col_name]['autoincrement'] = True
colmap[col_name]['sequence'] = dict(
name='%s_identity' % col_name)
break
cursor.close()
if ic is not None and self.server_version_info >= MS_2005_VERSION:
table_fullname = "%s.%s" % (owner, tablename)
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
)
row = cursor.first()
if row is not None and row[0] is not None:
colmap[ic]['sequence'].update({
'start': int(row[0]),
'increment': int(row[1])
})
return cols
@reflection.cache
@_db_plus_owner
def get_pk_constraint(self, connection, tablename,
dbname, owner, schema, **kw):
pkeys = []
TC = ischema.constraints
C = ischema.key_constraints.alias('C')
# Primary key constraints
s = sql.select([C.c.column_name,
TC.c.constraint_type,
C.c.constraint_name],
sql.and_(TC.c.constraint_name == C.c.constraint_name,
TC.c.table_schema == C.c.table_schema,
C.c.table_name == tablename,
C.c.table_schema == owner)
)
c = connection.execute(s)
constraint_name = None
for row in c:
if 'PRIMARY' in row[TC.c.constraint_type.name]:
pkeys.append(row[0])
if constraint_name is None:
constraint_name = row[C.c.constraint_name.name]
return {'constrained_columns': pkeys, 'name': constraint_name}
@reflection.cache
@_db_plus_owner
def get_foreign_keys(self, connection, tablename,
dbname, owner, schema, **kw):
RR = ischema.ref_constraints
C = ischema.key_constraints.alias('C')
R = ischema.key_constraints.alias('R')
# Foreign key constraints
s = sql.select([C.c.column_name,
R.c.table_schema, R.c.table_name, R.c.column_name,
RR.c.constraint_name, RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule],
sql.and_(C.c.table_name == tablename,
C.c.table_schema == owner,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name ==
RR.c.unique_constraint_name,
C.c.ordinal_position == R.c.ordinal_position
),
order_by=[RR.c.constraint_name, R.c.ordinal_position]
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
fknm, scols, rcols = (None, [], [])
def fkey_rec():
return {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec['name'] = rfknm
if not rec['referred_table']:
rec['referred_table'] = rtbl
if schema is not None or owner != rschema:
if dbname:
rschema = dbname + "." + rschema
rec['referred_schema'] = rschema
local_cols, remote_cols = \
rec['constrained_columns'],\
rec['referred_columns']
local_cols.append(scol)
remote_cols.append(rcol)
return list(fkeys.values())
| mit |
pacoqueen/bbinn | PyChart-1.39/pychart/fill_style.py | 12 | 10044 | #
# Copyright (C) 2000-2005 by Yasushi Saito (yasushi.saito@gmail.com)
#
# Jockey is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Jockey is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
import pychart_util
import color
import line_style
import chart_object
import object_set
import types
import theme
import fill_style_doc
from pychart_types import *
from scaling import *
_keys = {
"bgcolor" : (color.T, color.white, "The background color."),
"line_style": (line_style.T, line_style.default,
pychart_util.line_desc),
"line_interval": (NumberType, 3,
"The interval between successive stitch lines.")
}
class T(chart_object.T):
__doc__ = fill_style_doc.doc
keys = _keys
##AUTOMATICALLY GENERATED
##END AUTOMATICALLY GENERATED
def __str__(self):
s = name_table().lookup(self)
if s:
return s
return "<fillstyle: bg=%s line=%s interval=%s>" % \
(self.bgcolor, self.line_style, self.line_interval)
class Plain(T):
"""This class just fills the region with solid background color.
Attributes line_style and line_interval are ignored."""
def draw(self, can, x1, y1, x2, y2):
pass
class Diag(T):
"This class fills the region with diagonal lines."
def draw(self, can, x1, y1, x2, y2):
line_width = self.line_style.width
interval = self.line_interval * 1.414
x1 -= line_width
y1 -= line_width
x2 += line_width
y2 += line_width
length = max(y2 - y1, x2 - x1)
curx = x1 - length
while curx < x2:
can.line(self.line_style, curx, y1, curx+length, y1+length)
curx += interval
class Rdiag(T):
"""Fills the region with diagonal lines, but tilted in the opposite
direction from fill_style.Diag."""
def draw(self, can, x1, y1, x2, y2):
line_width = self.line_style.width
interval = self.line_interval * 1.414
x1 -= line_width
y1 -= line_width
x2 += line_width
y2 += line_width
length = max(y2 - y1, x2 - x1)
curx = x1
while curx < x2 + length:
can.line(self.line_style, curx, y1, curx-length, y1+length)
curx += interval
class Vert(T):
"Fills the region with vertical lines"
def draw(self, can, x1, y1, x2, y2):
interval = self.line_interval
curx = x1
while curx < x2:
can.line(self.line_style, curx, y1, curx, y2)
curx += interval
class Horiz(T):
"Fills the region with horizontal lines"
def draw(self, can, x1, y1, x2, y2):
interval = self.line_interval
cury = y1
while cury < y2:
can.line(self.line_style, x1, cury, x2, cury)
cury += interval
class Stitch(T):
"Fills the region with horizontal and vertical lines."
def draw(self, can, x1, y1, x2, y2):
interval = self.line_interval
cury = y1
while cury < y2:
can.line(self.line_style, x1, cury, x2, cury)
cury += interval
curx = x1
while curx < x2:
can.line(self.line_style, curx, y1, curx, y2)
curx += interval
class Wave(T):
"Fills the region with horizontal wavy lines."
def draw(self, can, x1, y1, x2, y2):
x1 = xscale(x1)
x2 = xscale(x2)
y1 = yscale(y1)
y2 = yscale(y2)
line_width = nscale(self.line_style.width)
interval = nscale(self.line_interval)
can.set_line_style(self.line_style)
x1 -= line_width
x2 += line_width
cury = y1
half = interval/2.0
while cury < y2:
curx = x1
can.newpath()
can.moveto(curx, cury)
while curx < x2:
can.lineto(curx + half, cury + half)
can.lineto(curx + interval, cury)
curx += interval
can.stroke()
cury += interval
class Vwave(T):
"""Fills the region with vertical wavy lines."""
def draw(self, can, x1, y1, x2, y2):
x1 = xscale(x1)
x2 = xscale(x2)
y1 = yscale(y1)
y2 = yscale(y2)
line_width = nscale(self.line_style.width)
interval = nscale(self.line_interval)
can.set_line_style(self.line_style)
y1 -= line_width
y2 += line_width
curx = x1
half = interval/2.0
while curx < x2:
cury = y1
can.newpath()
can.moveto(curx, cury)
while cury < y2:
can.lineto(curx + half, cury + half)
can.lineto(curx, cury + interval)
cury += interval
can.stroke()
curx += interval
class Lines(T):
"""Fills the region with a series of short line segments."""
def draw(self, can, x1, y1, x2, y2):
interval = nscale(self.line_interval)
cury = y1
j = 0
while cury < y2:
curx = x1
if j % 2 == 1:
curx += interval/2.0
while curx < x2:
can.line(self.line_style, curx, cury, curx+interval/2.0, cury)
curx += interval * 1.5
j += 1
cury += interval
default = Plain()
color_standards = object_set.T()
grayscale_standards = object_set.T()
def _intern_both(style):
global color_standards, grayscale_standards
color_standards.add(style)
grayscale_standards.add(style)
return style
def _intern_color(style):
global color_standards, grayscale_standards
color_standards.add(style)
return style
def _intern_grayscale(style):
global color_standards, grayscale_standards
grayscale_standards.add(style)
return style
black = _intern_both(Plain(bgcolor=color.gray_scale(0.0), line_style=None))
red = _intern_color(Plain(bgcolor=color.red))
darkseagreen = _intern_color(Plain(bgcolor=color.darkseagreen))
blue = _intern_color(Plain(bgcolor=color.blue))
aquamarine1 = _intern_color(Plain(bgcolor=color.aquamarine1))
gray70 = _intern_both(Plain(bgcolor=color.gray70, line_style=None))
brown = _intern_color(Plain(bgcolor=color.brown))
darkorchid = _intern_color(Plain(bgcolor=color.darkorchid))
diag = _intern_both(Diag(line_style=line_style.T(cap_style=2)))
green = _intern_color(Plain(bgcolor=color.green))
gray50 = _intern_both(Plain(bgcolor=color.gray50, line_style=None))
white = _intern_both(Plain(bgcolor=color.gray_scale(1.0), line_style=None))
goldenrod = _intern_color(Plain(bgcolor=color.goldenrod))
rdiag = _intern_both(Rdiag(line_style=line_style.T(cap_style=2)))
vert = _intern_both(Vert(line_interval=1.8))
gray30 = _intern_both(Plain(bgcolor=color.gray30, line_style=None))
gray20 = _intern_both(Plain(bgcolor=color.gray20, line_style=None))
gray10 = _intern_both(Plain(bgcolor=color.gray10, line_style=None))
diag2 = _intern_both(Diag(line_style=line_style.T(width=3, cap_style=2),
line_interval=6))
rdiag2 = _intern_both(Rdiag(line_style=line_style.T(width=3, cap_style=2),
line_interval=6))
yellow = _intern_color(Plain(bgcolor=color.yellow))
diag3 = _intern_both(Diag(line_style=line_style.T(width=3, color=color.gray50, cap_style=2),
line_interval=6))
horiz = _intern_both(Horiz(line_interval=1.8))
gray90 = _intern_both(Plain(bgcolor=color.gray90, line_style=None))
rdiag3 = _intern_both(Rdiag(line_style=line_style.T(width=3,
color=color.gray50,
cap_style=2),
line_interval=6))
wave = _intern_both(Wave(line_style=line_style.T(cap_style=2, join_style=1)))
vwave = _intern_both(Vwave(line_style=line_style.T(cap_style=2, join_style=1)))
stitch = _intern_both(Stitch(line_style=line_style.T(cap_style=2, join_style=1)))
lines = _intern_both(Lines(line_style=line_style.T()))
diag_fine = _intern_both(Diag(line_style=line_style.T(width=0.75,cap_style=2),
line_interval = 1.5))
diag2_fine = _intern_both(Diag(line_style=line_style.T(width=0.75, cap_style=2),
line_interval=1.5))
diag3_fine = _intern_both(Diag(line_style=line_style.T(width=0.75,
color = color.gray50,
cap_style=2),
line_interval=1.5))
rdiag_fine = _intern_both(Rdiag(line_style=line_style.T(width=0.75,cap_style=2),
line_interval = 1.5))
rdiag2_fine = _intern_both(Rdiag(line_style=line_style.T(width=0.75, cap_style=2),
line_interval=1.5))
rdiag3_fine = _intern_both(Rdiag(line_style=line_style.T(width=0.75,
color = color.gray50,
cap_style=2),
line_interval=1.5))
horiz_fine = _intern_both(Horiz(line_interval=1.5))
vert_fine = _intern_both(Vert(line_interval=1.5))
#
# Fill styles for color charts.
#
standards = None
_name_table = None
def init():
global standards, _name_table
if theme.use_color:
standards = color_standards
else:
standards = grayscale_standards
_name_table = None
def name_table():
global _name_table
if not _name_table:
_name_table = pychart_util.symbol_lookup_table(globals(), standards)
return _name_table
init()
theme.add_reinitialization_hook(init)
| gpl-2.0 |
mbillard/grid-framework | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/mac_tool.py | 377 | 19309 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _CopyStringsFile(self, source, dest):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile('[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line):
print >>sys.stderr, line
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return dict((k, self._ExpandVariables(data[k],
substitutions)) for k in data)
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
openhatch/oh-mainline | vendor/packages/django-invitation/invitation/views.py | 15 | 3050 | from django.conf import settings
from django.views.generic import TemplateView
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from registration.views import register as registration_register
from registration.forms import RegistrationForm
from invitation.models import InvitationKey
from invitation.forms import InvitationKeyForm
is_key_valid = InvitationKey.objects.is_key_valid
# TODO: move the authorization control to a dedicated decorator
def invited(request, invitation_key=None):
if 'INVITE_MODE' in settings.get_all_members() and settings.INVITE_MODE:
if invitation_key and is_key_valid(invitation_key):
template = 'invitation/invited.html'
else:
template = 'invitation/wrong_invitation_key.html'
return direct_to_template(request, template, {'invitation_key': invitation_key})
else:
return HttpResponseRedirect(reverse('registration_register'))
def register(request, success_url=None,
form_class=RegistrationForm, profile_callback=None,
template_name='registration/registration_form.html',
extra_context=None):
if 'INVITE_MODE' in settings.get_all_members() and settings.INVITE_MODE:
if 'invitation_key' in request.REQUEST \
and is_key_valid(request.REQUEST['invitation_key']):
if extra_context is None:
extra_context = {'invitation_key': request.REQUEST['invitation_key']}
else:
extra_context.update({'invitation_key': invitation_key})
return registration_register(request, success_url, form_class,
profile_callback, template_name, extra_context)
else:
return direct_to_template(request, 'invitation/wrong_invitation_key.html')
else:
return registration_register(request, success_url, form_class,
profile_callback, template_name, extra_context)
def invite(request, success_url=None,
form_class=InvitationKeyForm,
template_name='invitation/invitation_form.html',):
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
invitation = InvitationKey.objects.create_invitation(request.user)
invitation.send_to(form.cleaned_data["email"])
# success_url needs to be dynamically generated here; setting a
# a default value using reverse() will cause circular-import
# problems with the default URLConf for this application, which
# imports this file.
return HttpResponseRedirect(success_url or reverse('invitation_complete'))
else:
form = form_class()
return direct_to_template(request, template_name, {
'form': form,
'remaining_invitations': InvitationKey.objects.remaining_invitations_for_user(request.user),
})
invite = login_required(invite)
| agpl-3.0 |
wakatime/sublime-wakatime | packages/wakatime/packages/py27/cryptography/hazmat/primitives/twofactor/totp.py | 39 | 1594 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography.exceptions import (
UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import HMACBackend
from cryptography.hazmat.primitives import constant_time
from cryptography.hazmat.primitives.twofactor import InvalidToken
from cryptography.hazmat.primitives.twofactor.hotp import HOTP
from cryptography.hazmat.primitives.twofactor.utils import _generate_uri
class TOTP(object):
def __init__(self, key, length, algorithm, time_step, backend,
enforce_key_length=True):
if not isinstance(backend, HMACBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HMACBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
self._time_step = time_step
self._hotp = HOTP(key, length, algorithm, backend, enforce_key_length)
def generate(self, time):
counter = int(time / self._time_step)
return self._hotp.generate(counter)
def verify(self, totp, time):
if not constant_time.bytes_eq(self.generate(time), totp):
raise InvalidToken("Supplied TOTP value does not match.")
def get_provisioning_uri(self, account_name, issuer):
return _generate_uri(self._hotp, "totp", account_name, issuer, [
("period", int(self._time_step)),
])
| bsd-3-clause |
ualikhansars/Gwent | lib/python2.7/site-packages/django/template/loader.py | 66 | 6521 | import warnings
from django.utils import six
from django.utils.deprecation import (
DeprecationInstanceCheck, RemovedInDjango20Warning,
RemovedInDjango110Warning,
)
from . import engines
from .backends.django import DjangoTemplates
from .base import Origin
from .engine import (
_context_instance_undefined, _dictionary_undefined, _dirs_undefined,
)
from .exceptions import TemplateDoesNotExist
from .loaders import base
def get_template(template_name, dirs=_dirs_undefined, using=None):
"""
Loads and returns a template for the given name.
Raises TemplateDoesNotExist if no such template exists.
"""
chain = []
engines = _engine_list(using)
for engine in engines:
try:
# This is required for deprecating the dirs argument. Simply
# return engine.get_template(template_name) in Django 1.10.
if isinstance(engine, DjangoTemplates):
return engine.get_template(template_name, dirs)
elif dirs is not _dirs_undefined:
warnings.warn(
"Skipping template backend %s because its get_template "
"method doesn't support the dirs argument." % engine.name,
stacklevel=2)
else:
return engine.get_template(template_name)
except TemplateDoesNotExist as e:
chain.append(e)
raise TemplateDoesNotExist(template_name, chain=chain)
def select_template(template_name_list, dirs=_dirs_undefined, using=None):
"""
Loads and returns a template for one of the given names.
Tries names in order and returns the first template found.
Raises TemplateDoesNotExist if no such template exists.
"""
chain = []
engines = _engine_list(using)
for template_name in template_name_list:
for engine in engines:
try:
# This is required for deprecating the dirs argument. Simply
# use engine.get_template(template_name) in Django 1.10.
if isinstance(engine, DjangoTemplates):
return engine.get_template(template_name, dirs)
elif dirs is not _dirs_undefined:
warnings.warn(
"Skipping template backend %s because its get_template "
"method doesn't support the dirs argument." % engine.name,
stacklevel=2)
else:
return engine.get_template(template_name)
except TemplateDoesNotExist as e:
chain.append(e)
if template_name_list:
raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)
else:
raise TemplateDoesNotExist("No template names provided")
def render_to_string(template_name, context=None,
context_instance=_context_instance_undefined,
dirs=_dirs_undefined,
dictionary=_dictionary_undefined,
request=None, using=None):
"""
Loads a template and renders it with a context. Returns a string.
template_name may be a string or a list of strings.
"""
if (context_instance is _context_instance_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
if isinstance(template_name, (list, tuple)):
template = select_template(template_name, using=using)
else:
template = get_template(template_name, using=using)
return template.render(context, request)
else:
chain = []
# Some deprecated arguments were passed - use the legacy code path
for engine in _engine_list(using):
try:
# This is required for deprecating properly arguments specific
# to Django templates. Remove Engine.render_to_string() at the
# same time as this code path in Django 1.10.
if isinstance(engine, DjangoTemplates):
if request is not None:
raise ValueError(
"render_to_string doesn't support the request argument "
"when some deprecated arguments are passed.")
# Hack -- use the internal Engine instance of DjangoTemplates.
return engine.engine.render_to_string(
template_name, context, context_instance, dirs, dictionary)
elif context_instance is not _context_instance_undefined:
warnings.warn(
"Skipping template backend %s because its render_to_string "
"method doesn't support the context_instance argument." %
engine.name, stacklevel=2)
elif dirs is not _dirs_undefined:
warnings.warn(
"Skipping template backend %s because its render_to_string "
"method doesn't support the dirs argument." % engine.name,
stacklevel=2)
elif dictionary is not _dictionary_undefined:
warnings.warn(
"Skipping template backend %s because its render_to_string "
"method doesn't support the dictionary argument." %
engine.name, stacklevel=2)
except TemplateDoesNotExist as e:
chain.append(e)
continue
if template_name:
if isinstance(template_name, (list, tuple)):
template_name = ', '.join(template_name)
raise TemplateDoesNotExist(template_name, chain=chain)
else:
raise TemplateDoesNotExist("No template names provided")
def _engine_list(using=None):
return engines.all() if using is None else [engines[using]]
class BaseLoader(base.Loader):
_accepts_engine_in_init = False
def __init__(self, *args, **kwargs):
warnings.warn(
"django.template.loader.BaseLoader was superseded by "
"django.template.loaders.base.Loader.",
RemovedInDjango110Warning, stacklevel=2)
super(BaseLoader, self).__init__(*args, **kwargs)
class LoaderOrigin(six.with_metaclass(DeprecationInstanceCheck, Origin)):
alternative = 'django.template.Origin'
deprecation_warning = RemovedInDjango20Warning
| mit |
ZuluPro/moto | tests/test_sns/test_subscriptions_boto3.py | 3 | 9890 | from __future__ import unicode_literals
import boto3
import json
import sure # noqa
from botocore.exceptions import ClientError
from nose.tools import assert_raises
from moto import mock_sns
from moto.sns.models import DEFAULT_PAGE_SIZE
@mock_sns
def test_subscribe_sms():
client = boto3.client('sns', region_name='us-east-1')
client.create_topic(Name="some-topic")
resp = client.create_topic(Name="some-topic")
arn = resp['TopicArn']
resp = client.subscribe(
TopicArn=arn,
Protocol='sms',
Endpoint='+15551234567'
)
resp.should.contain('SubscriptionArn')
@mock_sns
def test_subscribe_bad_sms():
client = boto3.client('sns', region_name='us-east-1')
client.create_topic(Name="some-topic")
resp = client.create_topic(Name="some-topic")
arn = resp['TopicArn']
try:
# Test invalid number
client.subscribe(
TopicArn=arn,
Protocol='sms',
Endpoint='NAA+15551234567'
)
except ClientError as err:
err.response['Error']['Code'].should.equal('InvalidParameter')
@mock_sns
def test_creating_subscription():
conn = boto3.client('sns', region_name='us-east-1')
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]['TopicArn']
conn.subscribe(TopicArn=topic_arn,
Protocol="http",
Endpoint="http://example.com/")
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(1)
subscription = subscriptions[0]
subscription["TopicArn"].should.equal(topic_arn)
subscription["Protocol"].should.equal("http")
subscription["SubscriptionArn"].should.contain(topic_arn)
subscription["Endpoint"].should.equal("http://example.com/")
# Now unsubscribe the subscription
conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"])
# And there should be zero subscriptions left
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(0)
@mock_sns
def test_deleting_subscriptions_by_deleting_topic():
conn = boto3.client('sns', region_name='us-east-1')
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]['TopicArn']
conn.subscribe(TopicArn=topic_arn,
Protocol="http",
Endpoint="http://example.com/")
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(1)
subscription = subscriptions[0]
subscription["TopicArn"].should.equal(topic_arn)
subscription["Protocol"].should.equal("http")
subscription["SubscriptionArn"].should.contain(topic_arn)
subscription["Endpoint"].should.equal("http://example.com/")
# Now delete the topic
conn.delete_topic(TopicArn=topic_arn)
# And there should now be 0 topics
topics_json = conn.list_topics()
topics = topics_json["Topics"]
topics.should.have.length_of(0)
# And there should be zero subscriptions left
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(0)
@mock_sns
def test_getting_subscriptions_by_topic():
conn = boto3.client('sns', region_name='us-east-1')
conn.create_topic(Name="topic1")
conn.create_topic(Name="topic2")
response = conn.list_topics()
topics = response["Topics"]
topic1_arn = topics[0]['TopicArn']
topic2_arn = topics[1]['TopicArn']
conn.subscribe(TopicArn=topic1_arn,
Protocol="http",
Endpoint="http://example1.com/")
conn.subscribe(TopicArn=topic2_arn,
Protocol="http",
Endpoint="http://example2.com/")
topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)[
"Subscriptions"]
topic1_subscriptions.should.have.length_of(1)
topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/")
@mock_sns
def test_subscription_paging():
conn = boto3.client('sns', region_name='us-east-1')
conn.create_topic(Name="topic1")
response = conn.list_topics()
topics = response["Topics"]
topic1_arn = topics[0]['TopicArn']
for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)):
conn.subscribe(TopicArn=topic1_arn,
Protocol='email',
Endpoint='email_' + str(index) + '@test.com')
all_subscriptions = conn.list_subscriptions()
all_subscriptions["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE)
next_token = all_subscriptions["NextToken"]
next_token.should.equal(str(DEFAULT_PAGE_SIZE))
all_subscriptions = conn.list_subscriptions(NextToken=next_token)
all_subscriptions["Subscriptions"].should.have.length_of(
int(DEFAULT_PAGE_SIZE / 3))
all_subscriptions.shouldnt.have("NextToken")
topic1_subscriptions = conn.list_subscriptions_by_topic(
TopicArn=topic1_arn)
topic1_subscriptions["Subscriptions"].should.have.length_of(
DEFAULT_PAGE_SIZE)
next_token = topic1_subscriptions["NextToken"]
next_token.should.equal(str(DEFAULT_PAGE_SIZE))
topic1_subscriptions = conn.list_subscriptions_by_topic(
TopicArn=topic1_arn, NextToken=next_token)
topic1_subscriptions["Subscriptions"].should.have.length_of(
int(DEFAULT_PAGE_SIZE / 3))
topic1_subscriptions.shouldnt.have("NextToken")
@mock_sns
def test_set_subscription_attributes():
conn = boto3.client('sns', region_name='us-east-1')
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]['TopicArn']
conn.subscribe(TopicArn=topic_arn,
Protocol="http",
Endpoint="http://example.com/")
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(1)
subscription = subscriptions[0]
subscription["TopicArn"].should.equal(topic_arn)
subscription["Protocol"].should.equal("http")
subscription["SubscriptionArn"].should.contain(topic_arn)
subscription["Endpoint"].should.equal("http://example.com/")
subscription_arn = subscription["SubscriptionArn"]
attrs = conn.get_subscription_attributes(
SubscriptionArn=subscription_arn
)
attrs.should.have.key('Attributes')
conn.set_subscription_attributes(
SubscriptionArn=subscription_arn,
AttributeName='RawMessageDelivery',
AttributeValue='true'
)
delivery_policy = json.dumps({
'healthyRetryPolicy': {
"numRetries": 10,
"minDelayTarget": 1,
"maxDelayTarget":2
}
})
conn.set_subscription_attributes(
SubscriptionArn=subscription_arn,
AttributeName='DeliveryPolicy',
AttributeValue=delivery_policy
)
attrs = conn.get_subscription_attributes(
SubscriptionArn=subscription_arn
)
attrs['Attributes']['RawMessageDelivery'].should.equal('true')
attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy)
# not existing subscription
with assert_raises(ClientError):
conn.set_subscription_attributes(
SubscriptionArn='invalid',
AttributeName='RawMessageDelivery',
AttributeValue='true'
)
with assert_raises(ClientError):
attrs = conn.get_subscription_attributes(
SubscriptionArn='invalid'
)
# invalid attr name
with assert_raises(ClientError):
conn.set_subscription_attributes(
SubscriptionArn=subscription_arn,
AttributeName='InvalidName',
AttributeValue='true'
)
@mock_sns
def test_check_not_opted_out():
conn = boto3.client('sns', region_name='us-east-1')
response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545375')
response.should.contain('isOptedOut')
response['isOptedOut'].should.be(False)
@mock_sns
def test_check_opted_out():
# Phone number ends in 99 so is hardcoded in the endpoint to return opted
# out status
conn = boto3.client('sns', region_name='us-east-1')
response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545399')
response.should.contain('isOptedOut')
response['isOptedOut'].should.be(True)
@mock_sns
def test_check_opted_out_invalid():
conn = boto3.client('sns', region_name='us-east-1')
# Invalid phone number
with assert_raises(ClientError):
conn.check_if_phone_number_is_opted_out(phoneNumber='+44742LALALA')
@mock_sns
def test_list_opted_out():
conn = boto3.client('sns', region_name='us-east-1')
response = conn.list_phone_numbers_opted_out()
response.should.contain('phoneNumbers')
len(response['phoneNumbers']).should.be.greater_than(0)
@mock_sns
def test_opt_in():
conn = boto3.client('sns', region_name='us-east-1')
response = conn.list_phone_numbers_opted_out()
current_len = len(response['phoneNumbers'])
assert current_len > 0
conn.opt_in_phone_number(phoneNumber=response['phoneNumbers'][0])
response = conn.list_phone_numbers_opted_out()
len(response['phoneNumbers']).should.be.greater_than(0)
len(response['phoneNumbers']).should.be.lower_than(current_len)
@mock_sns
def test_confirm_subscription():
conn = boto3.client('sns', region_name='us-east-1')
response = conn.create_topic(Name='testconfirm')
conn.confirm_subscription(
TopicArn=response['TopicArn'],
Token='2336412f37fb687f5d51e6e241d59b68c4e583a5cee0be6f95bbf97ab8d2441cf47b99e848408adaadf4c197e65f03473d53c4ba398f6abbf38ce2e8ebf7b4ceceb2cd817959bcde1357e58a2861b05288c535822eb88cac3db04f592285249971efc6484194fc4a4586147f16916692',
AuthenticateOnUnsubscribe='true'
)
| apache-2.0 |
lukas-krecan/tensorflow | tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py | 9 | 2697 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class AssignOpTest(tf.test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
with self.test_session() as sess:
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(tf.zeros([1024, 1024]))
adds = [tf.assign_add(p, ones_t, use_locking=False)
for _ in range(20)]
tf.initialize_all_variables().run()
def run_add(add_op):
sess.run(add_op)
threads = [self.checkedThread(target=run_add, args=(add_op,))
for add_op in adds]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
with self.test_session() as sess:
ones_t = tf.fill([1024, 1024], float(1))
p = tf.Variable(tf.zeros([1024, 1024]))
assigns = [tf.assign(p, tf.mul(ones_t, float(i)), False)
for i in range(1, 21)]
tf.initialize_all_variables().run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [self.checkedThread(target=run_assign, args=(assign_op,))
for assign_op in assigns]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
AutorestCI/azure-sdk-for-python | azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/models/screen.py | 2 | 2961 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Screen(Model):
"""The response for a Screen text request.
:param original_text: The original text.
:type original_text: str
:param normalized_text: The normalized text.
:type normalized_text: str
:param auto_corrected_text: The autocorrected text
:type auto_corrected_text: str
:param misrepresentation: The misrepresentation text.
:type misrepresentation: list[str]
:param classification: The classification details of the text.
:type classification:
~azure.cognitiveservices.vision.contentmoderator.models.Classification
:param status: The evaluate status.
:type status:
~azure.cognitiveservices.vision.contentmoderator.models.Status
:param pii: Personal Identifier Information details.
:type pii: ~azure.cognitiveservices.vision.contentmoderator.models.PII
:param language: Language of the input text content.
:type language: str
:param terms:
:type terms:
list[~azure.cognitiveservices.vision.contentmoderator.models.DetectedTerms]
:param tracking_id: Unique Content Moderator transaction Id.
:type tracking_id: str
"""
_attribute_map = {
'original_text': {'key': 'OriginalText', 'type': 'str'},
'normalized_text': {'key': 'NormalizedText', 'type': 'str'},
'auto_corrected_text': {'key': 'AutoCorrectedText', 'type': 'str'},
'misrepresentation': {'key': 'Misrepresentation', 'type': '[str]'},
'classification': {'key': 'Classification', 'type': 'Classification'},
'status': {'key': 'Status', 'type': 'Status'},
'pii': {'key': 'PII', 'type': 'PII'},
'language': {'key': 'Language', 'type': 'str'},
'terms': {'key': 'Terms', 'type': '[DetectedTerms]'},
'tracking_id': {'key': 'TrackingId', 'type': 'str'},
}
def __init__(self, original_text=None, normalized_text=None, auto_corrected_text=None, misrepresentation=None, classification=None, status=None, pii=None, language=None, terms=None, tracking_id=None):
super(Screen, self).__init__()
self.original_text = original_text
self.normalized_text = normalized_text
self.auto_corrected_text = auto_corrected_text
self.misrepresentation = misrepresentation
self.classification = classification
self.status = status
self.pii = pii
self.language = language
self.terms = terms
self.tracking_id = tracking_id
| mit |
shingonoide/odoo | addons/account_bank_statement_extensions/wizard/cancel_statement_line.py | 381 | 1484 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class cancel_statement_line(osv.osv_memory):
_name = 'cancel.statement.line'
_description = 'Cancel selected statement lines'
def cancel_lines(self, cr, uid, ids, context):
line_ids = context['active_ids']
line_obj = self.pool.get('account.bank.statement.line')
line_obj.write(cr, uid, line_ids, {'state': 'draft'}, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shsingh/ansible | test/units/modules/network/check_point/test_cp_mgmt_dns_domain.py | 19 | 3873 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_dns_domain
OBJECT = {
"name": ".www.example.com",
"is_sub_domain": False
}
CREATE_PAYLOAD = {
"name": ".www.example.com",
"is_sub_domain": False
}
UPDATE_PAYLOAD = {
"name": ".www.example.com",
"is_sub_domain": True
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": ".www.example.com",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_dns_domain.api_call'
api_call_object = 'dns-domain'
class TestCheckpointDnsDomain(object):
module = cp_mgmt_dns_domain
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.