repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
aakash-cr7/zulip
zerver/management/commands/runtornado.py
32
4794
from __future__ import absolute_import from __future__ import print_function from django.conf import settings settings.RUNNING_INSIDE_TORNADO = True # We must call zerver.tornado.ioloop_logging.instrument_tornado_ioloop # before we import anything else from our project in order for our # Tornado load logging to work; otherwise we might accidentally import # zerver.lib.queue (which will instantiate the Tornado ioloop) before # this. from zerver.tornado.ioloop_logging import instrument_tornado_ioloop instrument_tornado_ioloop() from django.core.management.base import BaseCommand, CommandError, CommandParser from tornado import ioloop from tornado.log import app_log from typing import Callable from zerver.lib.debug import interactive_debug_listen from zerver.lib.queue import setup_tornado_rabbitmq from zerver.tornado.application import create_tornado_application from zerver.tornado.event_queue import add_client_gc_hook, \ missedmessage_hook, process_notification, setup_event_queue from zerver.tornado.socket import respond_send_message import logging import sys if settings.USING_RABBITMQ: from zerver.lib.queue import get_queue_client def handle_callback_exception(callback): # type: (Callable) -> None logging.exception("Exception in callback") app_log.error("Exception in callback %r", callback, exc_info=True) class Command(BaseCommand): help = "Starts a Tornado Web server wrapping Django." def add_arguments(self, parser): # type: (CommandParser) -> None parser.add_argument('addrport', nargs="?", type=str, help='[optional port number or ipaddr:port]\n ' '(use multiple ports to start multiple servers)') parser.add_argument('--nokeepalive', action='store_true', dest='no_keep_alive', default=False, help="Tells Tornado to NOT keep alive http connections.") parser.add_argument('--noxheaders', action='store_false', dest='xheaders', default=True, help="Tells Tornado to NOT override remote IP with X-Real-IP.") def handle(self, addrport, **options): # type: (str, **bool) -> None interactive_debug_listen() import django from tornado import httpserver try: addr, port = addrport.split(':') except ValueError: addr, port = '', addrport if not addr: addr = '127.0.0.1' if not port.isdigit(): raise CommandError("%r is not a valid port number." % (port,)) xheaders = options.get('xheaders', True) no_keep_alive = options.get('no_keep_alive', False) quit_command = 'CTRL-C' if settings.DEBUG: logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s') def inner_run(): # type: () -> None from django.conf import settings from django.utils import translation translation.activate(settings.LANGUAGE_CODE) print("Validating Django models.py...") self.check(display_num_errors=True) print("\nDjango version %s" % (django.get_version())) print("Tornado server is running at http://%s:%s/" % (addr, port)) print("Quit the server with %s." % (quit_command,)) if settings.USING_RABBITMQ: queue_client = get_queue_client() # Process notifications received via RabbitMQ queue_client.register_json_consumer('notify_tornado', process_notification) queue_client.register_json_consumer('tornado_return', respond_send_message) try: # Application is an instance of Django's standard wsgi handler. application = create_tornado_application() # start tornado web server in single-threaded mode http_server = httpserver.HTTPServer(application, xheaders=xheaders, no_keep_alive=no_keep_alive) http_server.listen(int(port), address=addr) setup_event_queue() add_client_gc_hook(missedmessage_hook) setup_tornado_rabbitmq() instance = ioloop.IOLoop.instance() if django.conf.settings.DEBUG: instance.set_blocking_log_threshold(5) instance.handle_callback_exception = handle_callback_exception instance.start() except KeyboardInterrupt: sys.exit(0) inner_run()
apache-2.0
bhavin04890/finaldashboard
controllers/dvr.py
8
1813
# -*- coding: utf-8 -*- module = request.controller resourcename = request.function if not settings.has_module(module): raise HTTP(404, body="Module disabled: %s" % module) # ----------------------------------------------------------------------------- def index(): """ Module's Home Page """ module_name = deployment_settings.modules[module].name_nice response.title = module_name item = None if settings.has_module("cms"): table = s3db.cms_post _item = db(table.module == module).select(table.id, table.body, limitby=(0, 1)).first() if _item: if s3_has_role(ADMIN): item = DIV(XML(_item.body), BR(), A(T("Edit"), _href=URL(c="cms", f="post", args=[_item.id, "update"], vars={"module":module}), _class="action-btn")) else: item = _item.body elif s3_has_role(ADMIN): item = DIV(H2(module_name), A(T("Edit"), _href=URL(c="cms", f="post", args="create", vars={"module":module}), _class="action-btn")) if not item: item = H2(module_name) # tbc report = "" response.view = "index.html" return dict(item=item, report=report) # ----------------------------------------------------------------------------- def case(): """ REST Controller """ return s3_rest_controller() # END =========================================================================
mit
Akylas/CouchPotatoServer
libs/jinja2/testsuite/lexnparse.py
8
14120
# -*- coding: utf-8 -*- """ jinja2.testsuite.lexnparse ~~~~~~~~~~~~~~~~~~~~~~~~~~ All the unittests regarding lexing, parsing and syntax. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import sys import unittest from jinja2.testsuite import JinjaTestCase from jinja2 import Environment, Template, TemplateSyntaxError, \ UndefinedError, nodes env = Environment() # how does a string look like in jinja syntax? if sys.version_info < (3, 0): def jinja_string_repr(string): return repr(string)[1:] else: jinja_string_repr = repr class LexerTestCase(JinjaTestCase): def test_raw1(self): tmpl = env.from_string('{% raw %}foo{% endraw %}|' '{%raw%}{{ bar }}|{% baz %}{% endraw %}') assert tmpl.render() == 'foo|{{ bar }}|{% baz %}' def test_raw2(self): tmpl = env.from_string('1 {%- raw -%} 2 {%- endraw -%} 3') assert tmpl.render() == '123' def test_balancing(self): env = Environment('{%', '%}', '${', '}') tmpl = env.from_string('''{% for item in seq %}${{'foo': item}|upper}{% endfor %}''') assert tmpl.render(seq=range(3)) == "{'FOO': 0}{'FOO': 1}{'FOO': 2}" def test_comments(self): env = Environment('<!--', '-->', '{', '}') tmpl = env.from_string('''\ <ul> <!--- for item in seq --> <li>{item}</li> <!--- endfor --> </ul>''') assert tmpl.render(seq=range(3)) == ("<ul>\n <li>0</li>\n " "<li>1</li>\n <li>2</li>\n</ul>") def test_string_escapes(self): for char in u'\0', u'\u2668', u'\xe4', u'\t', u'\r', u'\n': tmpl = env.from_string('{{ %s }}' % jinja_string_repr(char)) assert tmpl.render() == char assert env.from_string('{{ "\N{HOT SPRINGS}" }}').render() == u'\u2668' def test_bytefallback(self): from pprint import pformat tmpl = env.from_string(u'''{{ 'foo'|pprint }}|{{ 'bär'|pprint }}''') assert tmpl.render() == pformat('foo') + '|' + pformat(u'bär') def test_operators(self): from jinja2.lexer import operators for test, expect in operators.iteritems(): if test in '([{}])': continue stream = env.lexer.tokenize('{{ %s }}' % test) stream.next() assert stream.current.type == expect def test_normalizing(self): for seq in '\r', '\r\n', '\n': env = Environment(newline_sequence=seq) tmpl = env.from_string('1\n2\r\n3\n4\n') result = tmpl.render() assert result.replace(seq, 'X') == '1X2X3X4' class ParserTestCase(JinjaTestCase): def test_php_syntax(self): env = Environment('<?', '?>', '<?=', '?>', '<!--', '-->') tmpl = env.from_string('''\ <!-- I'm a comment, I'm not interesting -->\ <? for item in seq -?> <?= item ?> <?- endfor ?>''') assert tmpl.render(seq=range(5)) == '01234' def test_erb_syntax(self): env = Environment('<%', '%>', '<%=', '%>', '<%#', '%>') tmpl = env.from_string('''\ <%# I'm a comment, I'm not interesting %>\ <% for item in seq -%> <%= item %> <%- endfor %>''') assert tmpl.render(seq=range(5)) == '01234' def test_comment_syntax(self): env = Environment('<!--', '-->', '${', '}', '<!--#', '-->') tmpl = env.from_string('''\ <!--# I'm a comment, I'm not interesting -->\ <!-- for item in seq ---> ${item} <!--- endfor -->''') assert tmpl.render(seq=range(5)) == '01234' def test_balancing(self): tmpl = env.from_string('''{{{'foo':'bar'}.foo}}''') assert tmpl.render() == 'bar' def test_start_comment(self): tmpl = env.from_string('''{# foo comment and bar comment #} {% macro blub() %}foo{% endmacro %} {{ blub() }}''') assert tmpl.render().strip() == 'foo' def test_line_syntax(self): env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%') tmpl = env.from_string('''\ <%# regular comment %> % for item in seq: ${item} % endfor''') assert [int(x.strip()) for x in tmpl.render(seq=range(5)).split()] == \ range(5) env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##') tmpl = env.from_string('''\ <%# regular comment %> % for item in seq: ${item} ## the rest of the stuff % endfor''') assert [int(x.strip()) for x in tmpl.render(seq=range(5)).split()] == \ range(5) def test_line_syntax_priority(self): # XXX: why is the whitespace there in front of the newline? env = Environment('{%', '%}', '${', '}', '/*', '*/', '##', '#') tmpl = env.from_string('''\ /* ignore me. I'm a multiline comment */ ## for item in seq: * ${item} # this is just extra stuff ## endfor''') assert tmpl.render(seq=[1, 2]).strip() == '* 1\n* 2' env = Environment('{%', '%}', '${', '}', '/*', '*/', '#', '##') tmpl = env.from_string('''\ /* ignore me. I'm a multiline comment */ # for item in seq: * ${item} ## this is just extra stuff ## extra stuff i just want to ignore # endfor''') assert tmpl.render(seq=[1, 2]).strip() == '* 1\n\n* 2' def test_error_messages(self): def assert_error(code, expected): try: Template(code) except TemplateSyntaxError, e: assert str(e) == expected, 'unexpected error message' else: assert False, 'that was supposed to be an error' assert_error('{% for item in seq %}...{% endif %}', "Encountered unknown tag 'endif'. Jinja was looking " "for the following tags: 'endfor' or 'else'. The " "innermost block that needs to be closed is 'for'.") assert_error('{% if foo %}{% for item in seq %}...{% endfor %}{% endfor %}', "Encountered unknown tag 'endfor'. Jinja was looking for " "the following tags: 'elif' or 'else' or 'endif'. The " "innermost block that needs to be closed is 'if'.") assert_error('{% if foo %}', "Unexpected end of template. Jinja was looking for the " "following tags: 'elif' or 'else' or 'endif'. The " "innermost block that needs to be closed is 'if'.") assert_error('{% for item in seq %}', "Unexpected end of template. Jinja was looking for the " "following tags: 'endfor' or 'else'. The innermost block " "that needs to be closed is 'for'.") assert_error('{% block foo-bar-baz %}', "Block names in Jinja have to be valid Python identifiers " "and may not contain hyphens, use an underscore instead.") assert_error('{% unknown_tag %}', "Encountered unknown tag 'unknown_tag'.") class SyntaxTestCase(JinjaTestCase): def test_call(self): env = Environment() env.globals['foo'] = lambda a, b, c, e, g: a + b + c + e + g tmpl = env.from_string("{{ foo('a', c='d', e='f', *['b'], **{'g': 'h'}) }}") assert tmpl.render() == 'abdfh' def test_slicing(self): tmpl = env.from_string('{{ [1, 2, 3][:] }}|{{ [1, 2, 3][::-1] }}') assert tmpl.render() == '[1, 2, 3]|[3, 2, 1]' def test_attr(self): tmpl = env.from_string("{{ foo.bar }}|{{ foo['bar'] }}") assert tmpl.render(foo={'bar': 42}) == '42|42' def test_subscript(self): tmpl = env.from_string("{{ foo[0] }}|{{ foo[-1] }}") assert tmpl.render(foo=[0, 1, 2]) == '0|2' def test_tuple(self): tmpl = env.from_string('{{ () }}|{{ (1,) }}|{{ (1, 2) }}') assert tmpl.render() == '()|(1,)|(1, 2)' def test_math(self): tmpl = env.from_string('{{ (1 + 1 * 2) - 3 / 2 }}|{{ 2**3 }}') assert tmpl.render() == '1.5|8' def test_div(self): tmpl = env.from_string('{{ 3 // 2 }}|{{ 3 / 2 }}|{{ 3 % 2 }}') assert tmpl.render() == '1|1.5|1' def test_unary(self): tmpl = env.from_string('{{ +3 }}|{{ -3 }}') assert tmpl.render() == '3|-3' def test_concat(self): tmpl = env.from_string("{{ [1, 2] ~ 'foo' }}") assert tmpl.render() == '[1, 2]foo' def test_compare(self): tmpl = env.from_string('{{ 1 > 0 }}|{{ 1 >= 1 }}|{{ 2 < 3 }}|' '{{ 2 == 2 }}|{{ 1 <= 1 }}') assert tmpl.render() == 'True|True|True|True|True' def test_inop(self): tmpl = env.from_string('{{ 1 in [1, 2, 3] }}|{{ 1 not in [1, 2, 3] }}') assert tmpl.render() == 'True|False' def test_literals(self): tmpl = env.from_string('{{ [] }}|{{ {} }}|{{ () }}') assert tmpl.render().lower() == '[]|{}|()' def test_bool(self): tmpl = env.from_string('{{ true and false }}|{{ false ' 'or true }}|{{ not false }}') assert tmpl.render() == 'False|True|True' def test_grouping(self): tmpl = env.from_string('{{ (true and false) or (false and true) and not false }}') assert tmpl.render() == 'False' def test_django_attr(self): tmpl = env.from_string('{{ [1, 2, 3].0 }}|{{ [[1]].0.0 }}') assert tmpl.render() == '1|1' def test_conditional_expression(self): tmpl = env.from_string('''{{ 0 if true else 1 }}''') assert tmpl.render() == '0' def test_short_conditional_expression(self): tmpl = env.from_string('<{{ 1 if false }}>') assert tmpl.render() == '<>' tmpl = env.from_string('<{{ (1 if false).bar }}>') self.assert_raises(UndefinedError, tmpl.render) def test_filter_priority(self): tmpl = env.from_string('{{ "foo"|upper + "bar"|upper }}') assert tmpl.render() == 'FOOBAR' def test_function_calls(self): tests = [ (True, '*foo, bar'), (True, '*foo, *bar'), (True, '*foo, bar=42'), (True, '**foo, *bar'), (True, '**foo, bar'), (False, 'foo, bar'), (False, 'foo, bar=42'), (False, 'foo, bar=23, *args'), (False, 'a, b=c, *d, **e'), (False, '*foo, **bar') ] for should_fail, sig in tests: if should_fail: self.assert_raises(TemplateSyntaxError, env.from_string, '{{ foo(%s) }}' % sig) else: env.from_string('foo(%s)' % sig) def test_tuple_expr(self): for tmpl in [ '{{ () }}', '{{ (1, 2) }}', '{{ (1, 2,) }}', '{{ 1, }}', '{{ 1, 2 }}', '{% for foo, bar in seq %}...{% endfor %}', '{% for x in foo, bar %}...{% endfor %}', '{% for x in foo, %}...{% endfor %}' ]: assert env.from_string(tmpl) def test_trailing_comma(self): tmpl = env.from_string('{{ (1, 2,) }}|{{ [1, 2,] }}|{{ {1: 2,} }}') assert tmpl.render().lower() == '(1, 2)|[1, 2]|{1: 2}' def test_block_end_name(self): env.from_string('{% block foo %}...{% endblock foo %}') self.assert_raises(TemplateSyntaxError, env.from_string, '{% block x %}{% endblock y %}') def test_constant_casing(self): for const in True, False, None: tmpl = env.from_string('{{ %s }}|{{ %s }}|{{ %s }}' % ( str(const), str(const).lower(), str(const).upper() )) assert tmpl.render() == '%s|%s|' % (const, const) def test_test_chaining(self): self.assert_raises(TemplateSyntaxError, env.from_string, '{{ foo is string is sequence }}') assert env.from_string('{{ 42 is string or 42 is number }}' ).render() == 'True' def test_string_concatenation(self): tmpl = env.from_string('{{ "foo" "bar" "baz" }}') assert tmpl.render() == 'foobarbaz' def test_notin(self): bar = xrange(100) tmpl = env.from_string('''{{ not 42 in bar }}''') assert tmpl.render(bar=bar) == unicode(not 42 in bar) def test_implicit_subscribed_tuple(self): class Foo(object): def __getitem__(self, x): return x t = env.from_string('{{ foo[1, 2] }}') assert t.render(foo=Foo()) == u'(1, 2)' def test_raw2(self): tmpl = env.from_string('{% raw %}{{ FOO }} and {% BAR %}{% endraw %}') assert tmpl.render() == '{{ FOO }} and {% BAR %}' def test_const(self): tmpl = env.from_string('{{ true }}|{{ false }}|{{ none }}|' '{{ none is defined }}|{{ missing is defined }}') assert tmpl.render() == 'True|False|None|True|False' def test_neg_filter_priority(self): node = env.parse('{{ -1|foo }}') assert isinstance(node.body[0].nodes[0], nodes.Filter) assert isinstance(node.body[0].nodes[0].node, nodes.Neg) def test_const_assign(self): constass1 = '''{% set true = 42 %}''' constass2 = '''{% for none in seq %}{% endfor %}''' for tmpl in constass1, constass2: self.assert_raises(TemplateSyntaxError, env.from_string, tmpl) def test_localset(self): tmpl = env.from_string('''{% set foo = 0 %}\ {% for item in [1, 2] %}{% set foo = 1 %}{% endfor %}\ {{ foo }}''') assert tmpl.render() == '0' def test_parse_unary(self): tmpl = env.from_string('{{ -foo["bar"] }}') assert tmpl.render(foo={'bar': 42}) == '-42' tmpl = env.from_string('{{ -foo["bar"]|abs }}') assert tmpl.render(foo={'bar': 42}) == '42' def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(LexerTestCase)) suite.addTest(unittest.makeSuite(ParserTestCase)) suite.addTest(unittest.makeSuite(SyntaxTestCase)) return suite
gpl-3.0
Endika/c2c-rd-addons
account_financial_report_chricar/wizard/wizard_print_journal_entries.py
5
4154
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved. # Jordi Esteve <jesteve@zikzakmedia.com> # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import wizard #import pooler from openerp.tools.translate import _ form = '''<?xml version="1.0"?> <form string="Print Journal"> <field name="journal_ids"/> <field name="period_ids"/> <field name="sort_selection"/> <field name="landscape"/> </form>''' fields = { 'journal_ids': {'string': 'Journal', 'type': 'many2many', 'relation': 'account.journal', 'required': True}, 'period_ids': {'string': 'Period', 'type': 'many2many', 'relation': 'account.period', 'required': True}, 'sort_selection': { 'string':"Entries Sorted By", 'type':'selection', 'selection':[('date','By date'),("to_number(name,'999999999')",'By entry number'),('ref','By reference number')], 'required':True, 'default': lambda *a: 'date', }, 'landscape': {'string':"Landscape Mode",'type':'boolean'}, } class wizard_print_journal(wizard.interface): def _get_defaults(self, cr, uid, data, context): fiscalyear_obj = pooler.get_pool(cr.dbname).get('account.fiscalyear') period_obj = pooler.get_pool(cr.dbname).get('account.period') journal_obj = pooler.get_pool(cr.dbname).get('account.journal') data['form']['period_ids'] = period_obj.search(cr, uid, [('fiscalyear_id','=',fiscalyear_obj.find(cr, uid))]) data['form']['journal_ids'] = journal_obj.search(cr, uid, []) return data['form'] def _check_data(self, cr, uid, data, *args): period_id = data['form']['period_ids'][0][2] journal_id = data['form']['journal_ids'][0][2] if type(period_id)==type([]): ids_final = [] for journal in journal_id: for period in period_id: ids_journal_period = pooler.get_pool(cr.dbname).get('account.journal.period').search(cr,uid, [('journal_id','=',journal),('period_id','=',period)]) if ids_journal_period: ids_final.append(ids_journal_period) if not ids_final: raise wizard.except_wizard(_('No Data Available'), _('No records found for your selection!')) return data['form'] def _check(self, cr, uid, data, context): if data['form']['landscape']==True: return 'report_landscape' else: return 'report' states = { 'init': { 'actions': [_get_defaults], 'result': {'type': 'form', 'arch': form, 'fields': fields, 'state': (('end', 'Cancel'), ('print', 'Print'))}, }, 'print': { 'actions': [_check_data], 'result': {'type':'choice','next_state':_check} }, 'report': { 'actions': [], 'result': {'type':'print', 'report':'account.print.journal.entries', 'state':'end'} }, 'report_landscape': { 'actions': [], 'result': {'type':'print', 'report':'account.print.journal.entriesh', 'state':'end'} }, } wizard_print_journal('account.journal.entries.report') # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
danhooper/sandbox
pinball/attract.py
1
2386
from procgame import * class Mode(game.Mode): def __init__(self, game): super(Mode, self).__init__(game, 1) highscore_categories = [] cat = highscore.HighScoreCategory() cat.game_data_key = "HighScores" cat.titles = [ "Grand Champion", "High Score 1", "High Score 2", "High Score 3", "High Score 4" ] highscore_categories.append(cat) for category in highscore_categories: category.load_from_game(game) frame_proc = dmd.Animation().load('dmd/P-ROC.dmd').frames[0] layer_proc = dmd.FrameLayer(opaque=True, frame=frame_proc) layer_th = dmd.TextLayer(128/2, 7, game.font_jazz, "center", opaque=True).set_text("Town Hall") layer_presents = dmd.TextLayer(128/2, 7, game.font_jazz, "center", opaque=True).set_text("Presents") layer_name = dmd.TextLayer(128/2, 7, game.font_jazz, "center", opaque=True).set_text("TBD") layer_high_scores = [] for frame in highscore.generate_highscore_frames(highscore_categories): layer_high_scores.append(dmd.FrameLayer(opaque=True, frame=frame)) self.layer = dmd.ScriptedLayer(128, 32, [ { "layer": None, "seconds": 10.0 }, { "layer": layer_proc, "seconds": 3.0 }, { "layer": layer_th, "seconds": 3.0 }, { "layer": layer_presents, "seconds": 3.0 }, { "layer": layer_name, "seconds": 3.0 }, { "layer": layer_high_scores[0], "seconds": 3.0 }, { "layer": layer_high_scores[1], "seconds": 3.0 }, { "layer": layer_high_scores[2], "seconds": 3.0 }, { "layer": layer_high_scores[3], "seconds": 3.0 }, { "layer": layer_high_scores[4], "seconds": 3.0 }, ]) def mode_stopped(self): self.layer.script_index = 0 self.frame_start_time = None self.is_new_script_item = True def sw_enter_active(self, sw): self.game.modes.add(self.game.service_mode) return True def sw_exit_active(self, sw): return True def sw_startButton_active(self, sw): self.game.modes.remove(self) self.game.modes.add(self.game.mode.base) return True
mit
userzimmermann/robotframework-python3
src/robot/utils/text.py
1
3235
# Copyright 2008-2014 Nokia Solutions and Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .charwidth import get_char_width from .misc import seq2str2 from .unic import unic _MAX_ASSIGN_LENGTH = 200 _MAX_ERROR_LINES = 40 _MAX_ERROR_LINE_LENGTH = 78 _ERROR_CUT_EXPLN = ' [ Message content over the limit has been removed. ]' def cut_long_message(msg): lines = msg.splitlines() lengths = _count_line_lengths(lines) if sum(lengths) <= _MAX_ERROR_LINES: return msg start = _prune_excess_lines(lines, lengths) end = _prune_excess_lines(lines, lengths, from_end=True) return '\n'.join(start + [_ERROR_CUT_EXPLN] + end) def _prune_excess_lines(lines, lengths, from_end=False): if from_end: lines.reverse() lengths.reverse() ret = [] total = 0 # Use // (explicit int div) for Python 3 compatibility: limit = _MAX_ERROR_LINES//2 for line, length in zip(lines[:limit], lengths[:limit]): if total + length >= limit: ret.append(_cut_long_line(line, total, from_end)) break total += length ret.append(line) if from_end: ret.reverse() return ret def _cut_long_line(line, used, from_end): # Use // (explicit int div) for Python 3 compatibility: available_lines = _MAX_ERROR_LINES//2 - used available_chars = available_lines * _MAX_ERROR_LINE_LENGTH - 3 if len(line) > available_chars: if not from_end: line = line[:available_chars] + '...' else: line = '...' + line[-available_chars:] return line def _count_line_lengths(lines): return [ _count_virtual_line_length(line) for line in lines ] def _count_virtual_line_length(line): if not line: return 1 lines, remainder = divmod(len(line), _MAX_ERROR_LINE_LENGTH) return lines if not remainder else lines + 1 def format_assign_message(variable, value, cut_long=True): value = unic(value) if variable.startswith('$') else seq2str2(value) if cut_long and len(value) > _MAX_ASSIGN_LENGTH: value = value[:_MAX_ASSIGN_LENGTH] + '...' return '%s = %s' % (variable, value) def get_console_length(text): return sum(get_char_width(char) for char in text) def pad_console_length(text, width): if width < 5: width = 5 diff = get_console_length(text) - width if diff > 0: text = _lose_width(text, diff+3) + '...' return _pad_width(text, width) def _pad_width(text, width): more = width - get_console_length(text) return text + ' ' * more def _lose_width(text, diff): lost = 0 while lost < diff: lost += get_console_length(text[-1]) text = text[:-1] return text
apache-2.0
trondhindenes/ansible
lib/ansible/template/template.py
156
1632
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import jinja2 __all__ = ['AnsibleJ2Template'] class AnsibleJ2Template(jinja2.environment.Template): ''' A helper class, which prevents Jinja2 from running _jinja2_vars through dict(). Without this, {% include %} and similar will create new contexts unlike the special one created in template_from_file. This ensures they are all alike, except for potential locals. ''' def new_context(self, vars=None, shared=False, locals=None): if vars is not None: if isinstance(vars, dict): vars = vars.copy() if locals is not None: vars.update(locals) else: vars = vars.add_locals(locals) return self.environment.context_class(self.environment, vars, self.name, self.blocks)
gpl-3.0
cntnboys/410Lab6
build/django/django/middleware/transaction.py
54
2703
import warnings from django.core.exceptions import MiddlewareNotUsed from django.db import connection, transaction from django.utils.deprecation import RemovedInDjango18Warning class TransactionMiddleware(object): """ Transaction middleware. If this is enabled, each view function will be run with commit_on_response activated - that way a save() doesn't do a direct commit, the commit is done when a successful response is created. If an exception happens, the database is rolled back. """ def __init__(self): warnings.warn( "TransactionMiddleware is deprecated in favor of ATOMIC_REQUESTS.", RemovedInDjango18Warning, stacklevel=2) if connection.settings_dict['ATOMIC_REQUESTS']: raise MiddlewareNotUsed def process_request(self, request): """Enters transaction management""" transaction.enter_transaction_management() def process_exception(self, request, exception): """Rolls back the database and leaves transaction management""" if transaction.is_dirty(): # This rollback might fail because of network failure for example. # If rollback isn't possible it is impossible to clean the # connection's state. So leave the connection in dirty state and # let request_finished signal deal with cleaning the connection. transaction.rollback() transaction.leave_transaction_management() def process_response(self, request, response): """Commits and leaves transaction management.""" if not transaction.get_autocommit(): if transaction.is_dirty(): # Note: it is possible that the commit fails. If the reason is # closed connection or some similar reason, then there is # little hope to proceed nicely. However, in some cases ( # deferred foreign key checks for exampl) it is still possible # to rollback(). try: transaction.commit() except Exception: # If the rollback fails, the transaction state will be # messed up. It doesn't matter, the connection will be set # to clean state after the request finishes. And, we can't # clean the state here properly even if we wanted to, the # connection is in transaction but we can't rollback... transaction.rollback() transaction.leave_transaction_management() raise transaction.leave_transaction_management() return response
apache-2.0
dagoaty/eve-wspace
evewspace/core/management/commands/defaultsettings.py
9
1570
# Eve W-Space # Copyright (C) 2013 Andrew Austin and other contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. An additional term under section # 7 of the GPL is included in the LICENSE file. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.core.management.base import NoArgsCommand, CommandError from django.conf import settings from django.utils.importlib import import_module from django.utils.module_loading import module_has_submodule from core.models import ConfigEntry class Command(NoArgsCommand): """ Load default settings from each application's default_settings.py file. """ def handle_noargs(self, **options): for app in settings.INSTALLED_APPS: mod = import_module(app) if module_has_submodule(mod, "default_settings"): try: def_mod = import_module("%s.default_settings" % app) def_mod.load_defaults() except: raise
gpl-3.0
chromium/chromium
build/util/lib/common/chrome_test_server_spawner.py
6
16817
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A "Test Server Spawner" that handles killing/stopping per-test test servers. It's used to accept requests from the device to spawn and kill instances of the chrome test server on the host. """ # pylint: disable=W0702 import json import logging import os import select import struct import subprocess import sys import threading import time from six.moves import BaseHTTPServer, urllib SERVER_TYPES = { 'http': '', 'ftp': '-f', 'ws': '--websocket', } _DIR_SOURCE_ROOT = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir)) _logger = logging.getLogger(__name__) # Path that are needed to import necessary modules when launching a testserver. os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s' % (os.path.join(_DIR_SOURCE_ROOT, 'third_party'), os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'tlslite'), os.path.join(_DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'))) # The timeout (in seconds) of starting up the Python test server. _TEST_SERVER_STARTUP_TIMEOUT = 10 def _GetServerTypeCommandLine(server_type): """Returns the command-line by the given server type. Args: server_type: the server type to be used (e.g. 'http'). Returns: A string containing the command-line argument. """ if server_type not in SERVER_TYPES: raise NotImplementedError('Unknown server type: %s' % server_type) return SERVER_TYPES[server_type] class PortForwarder: def Map(self, port_pairs): pass def GetDevicePortForHostPort(self, host_port): """Returns the device port that corresponds to a given host port.""" return host_port def WaitHostPortAvailable(self, port): """Returns True if |port| is available.""" return True def WaitPortNotAvailable(self, port): """Returns True if |port| is not available.""" return True def WaitDevicePortReady(self, port): """Returns whether the provided port is used.""" return True def Unmap(self, device_port): """Unmaps specified port""" pass class TestServerThread(threading.Thread): """A thread to run the test server in a separate process.""" def __init__(self, ready_event, arguments, port_forwarder): """Initialize TestServerThread with the following argument. Args: ready_event: event which will be set when the test server is ready. arguments: dictionary of arguments to run the test server. device: An instance of DeviceUtils. tool: instance of runtime error detection tool. """ threading.Thread.__init__(self) self.wait_event = threading.Event() self.stop_event = threading.Event() self.ready_event = ready_event self.ready_event.clear() self.arguments = arguments self.port_forwarder = port_forwarder self.test_server_process = None self.is_ready = False self.host_port = self.arguments['port'] self.host_ocsp_port = 0 assert isinstance(self.host_port, int) # The forwarder device port now is dynamically allocated. self.forwarder_device_port = 0 self.forwarder_ocsp_device_port = 0 # Anonymous pipe in order to get port info from test server. self.pipe_in = None self.pipe_out = None self.process = None self.command_line = [] def _WaitToStartAndGetPortFromTestServer(self): """Waits for the Python test server to start and gets the port it is using. The port information is passed by the Python test server with a pipe given by self.pipe_out. It is written as a result to |self.host_port|. Returns: Whether the port used by the test server was successfully fetched. """ assert self.host_port == 0 and self.pipe_out and self.pipe_in (in_fds, _, _) = select.select([self.pipe_in, ], [], [], _TEST_SERVER_STARTUP_TIMEOUT) if len(in_fds) == 0: _logger.error('Failed to wait to the Python test server to be started.') return False # First read the data length as an unsigned 4-byte value. This # is _not_ using network byte ordering since the Python test server packs # size as native byte order and all Chromium platforms so far are # configured to use little-endian. # TODO(jnd): Change the Python test server and local_test_server_*.cc to # use a unified byte order (either big-endian or little-endian). data_length = os.read(self.pipe_in, struct.calcsize('=L')) if data_length: (data_length,) = struct.unpack('=L', data_length) assert data_length if not data_length: _logger.error('Failed to get length of server data.') return False server_data_json = os.read(self.pipe_in, data_length) if not server_data_json: _logger.error('Failed to get server data.') return False _logger.info('Got port json data: %s', server_data_json) parsed_server_data = None try: parsed_server_data = json.loads(server_data_json) except ValueError: pass if not isinstance(parsed_server_data, dict): _logger.error('Failed to parse server_data: %s' % server_data_json) return False if not isinstance(parsed_server_data.get('port'), int): _logger.error('Failed to get port information from the server data.') return False self.host_port = parsed_server_data['port'] self.host_ocsp_port = parsed_server_data.get('ocsp_port', 0) return self.port_forwarder.WaitPortNotAvailable(self.host_port) def _GenerateCommandLineArguments(self): """Generates the command line to run the test server. Note that all options are processed by following the definitions in testserver.py. """ if self.command_line: return args_copy = dict(self.arguments) # Translate the server type. type_cmd = _GetServerTypeCommandLine(args_copy.pop('server-type')) if type_cmd: self.command_line.append(type_cmd) # Use a pipe to get the port given by the instance of Python test server # if the test does not specify the port. assert self.host_port == args_copy['port'] if self.host_port == 0: (self.pipe_in, self.pipe_out) = os.pipe() self.command_line.append('--startup-pipe=%d' % self.pipe_out) # Pass the remaining arguments as-is. for key, values in args_copy.iteritems(): if not isinstance(values, list): values = [values] for value in values: if value is None: self.command_line.append('--%s' % key) else: self.command_line.append('--%s=%s' % (key, value)) def _CloseUnnecessaryFDsForTestServerProcess(self): # This is required to avoid subtle deadlocks that could be caused by the # test server child process inheriting undesirable file descriptors such as # file lock file descriptors. for fd in xrange(0, 1024): if fd != self.pipe_out: try: os.close(fd) except: pass def run(self): _logger.info('Start running the thread!') self.wait_event.clear() self._GenerateCommandLineArguments() command = [sys.executable, os.path.join(_DIR_SOURCE_ROOT, 'net', 'tools', 'testserver', 'testserver.py')] + self.command_line _logger.info('Running: %s', command) # Disable PYTHONUNBUFFERED because it has a bad interaction with the # testserver. Remove once this interaction is fixed. unbuf = os.environ.pop('PYTHONUNBUFFERED', None) # Pass _DIR_SOURCE_ROOT as the child's working directory so that relative # paths in the arguments are resolved correctly. self.process = subprocess.Popen( command, preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess, cwd=_DIR_SOURCE_ROOT) if unbuf: os.environ['PYTHONUNBUFFERED'] = unbuf if self.process: if self.pipe_out: self.is_ready = self._WaitToStartAndGetPortFromTestServer() else: self.is_ready = self.port_forwarder.WaitPortNotAvailable(self.host_port) if self.is_ready: port_map = [(0, self.host_port)] if self.host_ocsp_port: port_map.extend([(0, self.host_ocsp_port)]) self.port_forwarder.Map(port_map) self.forwarder_device_port = \ self.port_forwarder.GetDevicePortForHostPort(self.host_port) if self.host_ocsp_port: self.forwarder_ocsp_device_port = \ self.port_forwarder.GetDevicePortForHostPort(self.host_ocsp_port) # Check whether the forwarder is ready on the device. self.is_ready = self.forwarder_device_port and \ self.port_forwarder.WaitDevicePortReady(self.forwarder_device_port) # Wake up the request handler thread. self.ready_event.set() # Keep thread running until Stop() gets called. self.stop_event.wait() if self.process.poll() is None: self.process.kill() # Wait for process to actually terminate. # (crbug.com/946475) self.process.wait() self.port_forwarder.Unmap(self.forwarder_device_port) self.process = None self.is_ready = False if self.pipe_out: os.close(self.pipe_in) os.close(self.pipe_out) self.pipe_in = None self.pipe_out = None _logger.info('Test-server has died.') self.wait_event.set() def Stop(self): """Blocks until the loop has finished. Note that this must be called in another thread. """ if not self.process: return self.stop_event.set() self.wait_event.wait() class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """A handler used to process http GET/POST request.""" def _SendResponse(self, response_code, response_reason, additional_headers, contents): """Generates a response sent to the client from the provided parameters. Args: response_code: number of the response status. response_reason: string of reason description of the response. additional_headers: dict of additional headers. Each key is the name of the header, each value is the content of the header. contents: string of the contents we want to send to client. """ self.send_response(response_code, response_reason) self.send_header('Content-Type', 'text/html') # Specify the content-length as without it the http(s) response will not # be completed properly (and the browser keeps expecting data). self.send_header('Content-Length', len(contents)) for header_name in additional_headers: self.send_header(header_name, additional_headers[header_name]) self.end_headers() self.wfile.write(contents) self.wfile.flush() def _StartTestServer(self): """Starts the test server thread.""" _logger.info('Handling request to spawn a test server.') content_type = self.headers.getheader('content-type') if content_type != 'application/json': raise Exception('Bad content-type for start request.') content_length = self.headers.getheader('content-length') if not content_length: content_length = 0 try: content_length = int(content_length) except: raise Exception('Bad content-length for start request.') _logger.info(content_length) test_server_argument_json = self.rfile.read(content_length) _logger.info(test_server_argument_json) if len(self.server.test_servers) >= self.server.max_instances: self._SendResponse(400, 'Invalid request', {}, 'Too many test servers running') return ready_event = threading.Event() new_server = TestServerThread(ready_event, json.loads(test_server_argument_json), self.server.port_forwarder) new_server.setDaemon(True) new_server.start() ready_event.wait() if new_server.is_ready: response = {'port': new_server.forwarder_device_port, 'message': 'started'}; if new_server.forwarder_ocsp_device_port: response['ocsp_port'] = new_server.forwarder_ocsp_device_port self._SendResponse(200, 'OK', {}, json.dumps(response)) _logger.info('Test server is running on port %d forwarded to %d.' % (new_server.forwarder_device_port, new_server.host_port)) port = new_server.forwarder_device_port assert port not in self.server.test_servers self.server.test_servers[port] = new_server else: new_server.Stop() self._SendResponse(500, 'Test Server Error.', {}, '') _logger.info('Encounter problem during starting a test server.') def _KillTestServer(self, params): """Stops the test server instance.""" try: port = int(params['port'][0]) except ValueError: port = None if port == None or port <= 0: self._SendResponse(400, 'Invalid request.', {}, 'port must be specified') return if port not in self.server.test_servers: self._SendResponse(400, 'Invalid request.', {}, "testserver isn't running on port %d" % port) return server = self.server.test_servers.pop(port) _logger.info('Handling request to kill a test server on port: %d.', port) server.Stop() # Make sure the status of test server is correct before sending response. if self.server.port_forwarder.WaitHostPortAvailable(port): self._SendResponse(200, 'OK', {}, 'killed') _logger.info('Test server on port %d is killed', port) else: # We expect the port to be free, but nothing stops the system from # binding something else to that port, so don't throw error. # (crbug.com/946475) self._SendResponse(200, 'OK', {}, '') _logger.warn('Port %s is not free after killing test server.' % port) def log_message(self, format, *args): # Suppress the default HTTP logging behavior if the logging level is higher # than INFO. if _logger.getEffectiveLevel() <= logging.INFO: pass def do_POST(self): parsed_path = urllib.parse.urlparse(self.path) action = parsed_path.path _logger.info('Action for POST method is: %s.', action) if action == '/start': self._StartTestServer() else: self._SendResponse(400, 'Unknown request.', {}, '') _logger.info('Encounter unknown request: %s.', action) def do_GET(self): parsed_path = urllib.parse.urlparse(self.path) action = parsed_path.path params = urllib.parse.parse_qs(parsed_path.query, keep_blank_values=1) _logger.info('Action for GET method is: %s.', action) for param in params: _logger.info('%s=%s', param, params[param][0]) if action == '/kill': self._KillTestServer(params) elif action == '/ping': # The ping handler is used to check whether the spawner server is ready # to serve the requests. We don't need to test the status of the test # server when handling ping request. self._SendResponse(200, 'OK', {}, 'ready') _logger.info('Handled ping request and sent response.') else: self._SendResponse(400, 'Unknown request', {}, '') _logger.info('Encounter unknown request: %s.', action) class SpawningServer(object): """The class used to start/stop a http server.""" def __init__(self, test_server_spawner_port, port_forwarder, max_instances): self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port), SpawningServerRequestHandler) self.server_port = self.server.server_port _logger.info('Started test server spawner on port: %d.', self.server_port) self.server.port_forwarder = port_forwarder self.server.test_servers = {} self.server.max_instances = max_instances def _Listen(self): _logger.info('Starting test server spawner.') self.server.serve_forever() def Start(self): """Starts the test server spawner.""" listener_thread = threading.Thread(target=self._Listen) listener_thread.setDaemon(True) listener_thread.start() def Stop(self): """Stops the test server spawner. Also cleans the server state. """ self.CleanupState() self.server.shutdown() def CleanupState(self): """Cleans up the spawning server state. This should be called if the test server spawner is reused, to avoid sharing the test server instance. """ if self.server.test_servers: _logger.warning('Not all test servers were stopped.') for port in self.server.test_servers: _logger.warning('Stopping test server on port %d' % port) self.server.test_servers[port].Stop() self.server.test_servers = {}
bsd-3-clause
Tomtomgo/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/unittestresults.py
155
2347
# Copyright (c) 2012, Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging import xml.dom.minidom _log = logging.getLogger(__name__) class UnitTestResults(object): @classmethod def results_from_string(self, string): if not string: return None try: dom = xml.dom.minidom.parseString(string) failures = [] for testcase in dom.getElementsByTagName('testcase'): if testcase.getElementsByTagName('failure').length != 0: testname = testcase.getAttribute('name') classname = testcase.getAttribute('classname') failures.append("%s.%s" % (classname, testname)) return failures except xml.parsers.expat.ExpatError, e: _log.error("XML error %s parsing unit test output" % str(e)) return None
bsd-3-clause
adamjmcgrath/glancydesign
src/django-nonrel/tests/regressiontests/comment_tests/tests/__init__.py
88
3272
from django.contrib.auth.models import User from django.contrib.comments.forms import CommentForm from django.contrib.comments.models import Comment from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.test import TestCase from regressiontests.comment_tests.models import Article, Author # Shortcut CT = ContentType.objects.get_for_model # Helper base class for comment tests that need data. class CommentTestCase(TestCase): fixtures = ["comment_tests"] urls = 'django.contrib.comments.urls' def createSomeComments(self): # Two anonymous comments on two different objects c1 = Comment.objects.create( content_type = CT(Article), object_pk = "1", user_name = "Joe Somebody", user_email = "jsomebody@example.com", user_url = "http://example.com/~joe/", comment = "First!", site = Site.objects.get_current(), ) c2 = Comment.objects.create( content_type = CT(Author), object_pk = "1", user_name = "Joe Somebody", user_email = "jsomebody@example.com", user_url = "http://example.com/~joe/", comment = "First here, too!", site = Site.objects.get_current(), ) # Two authenticated comments: one on the same Article, and # one on a different Author user = User.objects.create( username = "frank_nobody", first_name = "Frank", last_name = "Nobody", email = "fnobody@example.com", password = "", is_staff = False, is_active = True, is_superuser = False, ) c3 = Comment.objects.create( content_type = CT(Article), object_pk = "1", user = user, user_url = "http://example.com/~frank/", comment = "Damn, I wanted to be first.", site = Site.objects.get_current(), ) c4 = Comment.objects.create( content_type = CT(Author), object_pk = "2", user = user, user_url = "http://example.com/~frank/", comment = "You get here first, too?", site = Site.objects.get_current(), ) return c1, c2, c3, c4 def getData(self): return { 'name' : 'Jim Bob', 'email' : 'jim.bob@example.com', 'url' : '', 'comment' : 'This is my comment', } def getValidData(self, obj): f = CommentForm(obj) d = self.getData() d.update(f.initial) return d from regressiontests.comment_tests.tests.app_api_tests import * from regressiontests.comment_tests.tests.feed_tests import * from regressiontests.comment_tests.tests.model_tests import * from regressiontests.comment_tests.tests.comment_form_tests import * from regressiontests.comment_tests.tests.templatetag_tests import * from regressiontests.comment_tests.tests.comment_view_tests import * from regressiontests.comment_tests.tests.moderation_view_tests import * from regressiontests.comment_tests.tests.comment_utils_moderators_tests import *
bsd-3-clause
sohkis/leanKernel-shamu
tools/perf/util/setup.py
2079
1438
#!/usr/bin/python2 from distutils.core import setup, Extension from os import getenv from distutils.command.build_ext import build_ext as _build_ext from distutils.command.install_lib import install_lib as _install_lib class build_ext(_build_ext): def finalize_options(self): _build_ext.finalize_options(self) self.build_lib = build_lib self.build_temp = build_tmp class install_lib(_install_lib): def finalize_options(self): _install_lib.finalize_options(self) self.build_dir = build_lib cflags = ['-fno-strict-aliasing', '-Wno-write-strings'] cflags += getenv('CFLAGS', '').split() build_lib = getenv('PYTHON_EXTBUILD_LIB') build_tmp = getenv('PYTHON_EXTBUILD_TMP') libtraceevent = getenv('LIBTRACEEVENT') liblk = getenv('LIBLK') ext_sources = [f.strip() for f in file('util/python-ext-sources') if len(f.strip()) > 0 and f[0] != '#'] perf = Extension('perf', sources = ext_sources, include_dirs = ['util/include'], extra_compile_args = cflags, extra_objects = [libtraceevent, liblk], ) setup(name='perf', version='0.1', description='Interface with the Linux profiling infrastructure', author='Arnaldo Carvalho de Melo', author_email='acme@redhat.com', license='GPLv2', url='http://perf.wiki.kernel.org', ext_modules=[perf], cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
gpl-2.0
jbking/demo-appengine-django-golang
myproject/django/utils/autoreload.py
110
5262
# Autoreloading launcher. # Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org). # Some taken from Ian Bicking's Paste (http://pythonpaste.org/). # # Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org) # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the CherryPy Team nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys, time, signal try: from django.utils.six.moves import _thread as thread except ImportError: from django.utils.six.moves import _dummy_thread as thread # This import does nothing, but it's necessary to avoid some race conditions # in the threading module. See http://code.djangoproject.com/ticket/2330 . try: import threading except ImportError: pass try: import termios except ImportError: termios = None RUN_RELOADER = True _mtimes = {} _win = (sys.platform == "win32") def code_changed(): global _mtimes, _win filenames = [getattr(m, "__file__", None) for m in sys.modules.values()] for filename in filter(None, filenames): if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] if filename.endswith("$py.class"): filename = filename[:-9] + ".py" if not os.path.exists(filename): continue # File might be in an egg, so it can't be reloaded. stat = os.stat(filename) mtime = stat.st_mtime if _win: mtime -= stat.st_ctime if filename not in _mtimes: _mtimes[filename] = mtime continue if mtime != _mtimes[filename]: _mtimes = {} return True return False def ensure_echo_on(): if termios: fd = sys.stdin if fd.isatty(): attr_list = termios.tcgetattr(fd) if not attr_list[3] & termios.ECHO: attr_list[3] |= termios.ECHO if hasattr(signal, 'SIGTTOU'): old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN) else: old_handler = None termios.tcsetattr(fd, termios.TCSANOW, attr_list) if old_handler is not None: signal.signal(signal.SIGTTOU, old_handler) def reloader_thread(): ensure_echo_on() while RUN_RELOADER: if code_changed(): sys.exit(3) # force reload time.sleep(1) def restart_with_reloader(): while True: args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv if sys.platform == "win32": args = ['"%s"' % arg for arg in args] new_environ = os.environ.copy() new_environ["RUN_MAIN"] = 'true' exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ) if exit_code != 3: return exit_code def python_reloader(main_func, args, kwargs): if os.environ.get("RUN_MAIN") == "true": thread.start_new_thread(main_func, args, kwargs) try: reloader_thread() except KeyboardInterrupt: pass else: try: exit_code = restart_with_reloader() if exit_code < 0: os.kill(os.getpid(), -exit_code) else: sys.exit(exit_code) except KeyboardInterrupt: pass def jython_reloader(main_func, args, kwargs): from _systemrestart import SystemRestart thread.start_new_thread(main_func, args) while True: if code_changed(): raise SystemRestart time.sleep(1) def main(main_func, args=None, kwargs=None): if args is None: args = () if kwargs is None: kwargs = {} if sys.platform.startswith('java'): reloader = jython_reloader else: reloader = python_reloader reloader(main_func, args, kwargs)
mit
onyxfish/votersdaily_web
api/couchdb/log_views.py
1
2166
import couchdb from couchdb.design import ViewDefinition """ This module defines a collection of functions which accept a CouchDB database as an argument, are named with a 'make_views_*' convention, and return a list of generated CouchDB ViewDefinitions. The 'syncviews' management command dynamically executes each method to compile a list of all Couchdb views. """ def make_views_all_documents(event_db): """ Generate a view that includes all documents. """ all_view_map_function = \ ''' function(doc) { emit(doc.access_datetime, doc) } ''' return [ViewDefinition('api', 'all', all_view_map_function)] def make_views_error_documents(event_db): """ Generate a view that includes all documents. """ error_view_map_function = \ ''' function(doc) { if (doc.result != "success") { emit(doc.access_datetime, doc) } } ''' return [ViewDefinition('api', 'errors', error_view_map_function)] def get_parser_list(event_db): """ Return a list of unique parser names in the database. """ parser_list_map_function = \ ''' function(doc) { emit(doc.parser_name, null); } ''' parser_list_reduce_function = \ ''' function(keys, values) { return null; } ''' return [ e.key for e in event_db.query( parser_list_map_function, parser_list_reduce_function, group=True)] def make_views_parser_lists(event_db): """ Return a list of views, one for each parser, using templated view functions. """ parser_names = get_parser_list(event_db) parser_view_map_function = \ ''' function(doc) { if (doc.parser_name == "%(parser_name)s") { emit(doc.parser_name, doc) } } ''' return [ ViewDefinition('api', name, parser_view_map_function % { 'parser_name': name }) for name in parser_names]
gpl-3.0
Deepakpatle/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/createbug.py
125
2563
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from webkitpy.tool.steps.abstractstep import AbstractStep from webkitpy.tool.steps.options import Options class CreateBug(AbstractStep): @classmethod def options(cls): return AbstractStep.options() + [ Options.cc, Options.component, Options.blocks, ] def run(self, state): # No need to create a bug if we already have one. if state.get("bug_id"): return cc = self._options.cc if not cc: cc = state.get("bug_cc") blocks = self._options.blocks if not blocks: blocks = state.get("bug_blocked") state["bug_id"] = self._tool.bugs.create_bug(state["bug_title"], state["bug_description"], blocked=blocks, component=self._options.component, cc=cc) if blocks: status = self._tool.bugs.fetch_bug(blocks).status() if status == 'RESOLVED': self._tool.bugs.reopen_bug(blocks, "Re-opened since this is blocked by bug %s" % state["bug_id"])
bsd-3-clause
antsmc2/mics
survey/tests/services/test_simple_indicator_service.py
2
12529
from random import randint from rapidsms.contrib.locations.models import LocationType, Location from survey.models import Backend, Investigator, QuestionModule, Question, QuestionOption, Indicator, Formula, Household, HouseholdHead, Batch, MultiChoiceAnswer, HouseholdMemberGroup, Survey, GroupCondition, EnumerationArea from survey.services.simple_indicator_service import SimpleIndicatorService from survey.tests.base_test import BaseTest class MultiChoiceQuestionSimpleIndicatorServiceTest(BaseTest): def setUp(self): self.survey = Survey.objects.create(name="haha") self.batch = Batch.objects.create(order=1) self.country = LocationType.objects.create(name="Country", slug="country") self.region = LocationType.objects.create(name="Region", slug="region") self.district = LocationType.objects.create(name="District", slug='district') self.uganda = Location.objects.create(name="Uganda", type=self.country) self.west = Location.objects.create(name="WEST", type=self.region, tree_parent=self.uganda) self.central = Location.objects.create(name="CENTRAL", type=self.region, tree_parent=self.uganda) self.kampala = Location.objects.create(name="Kampala", tree_parent=self.central, type=self.district) self.mbarara = Location.objects.create(name="Mbarara", tree_parent=self.west, type=self.district) self.ea = EnumerationArea.objects.create(name="EA2", survey=self.survey) self.ea.locations.add(self.kampala) self.mbarara_ea = EnumerationArea.objects.create(name="EA3", survey=self.survey) self.mbarara_ea.locations.add(self.mbarara) backend = Backend.objects.create(name='something') self.investigator = Investigator.objects.create(name="Investigator 1", mobile_number="1", ea=self.ea, backend=backend) self.investigator_2 = Investigator.objects.create(name="Investigator 1", mobile_number="33331", ea=self.mbarara_ea, backend=backend) health_module = QuestionModule.objects.create(name="Health") member_group = HouseholdMemberGroup.objects.create(name="Greater than 2 years", order=1) self.question_3 = Question.objects.create(text="This is a question", answer_type=Question.MULTICHOICE, order=3, module=health_module, group=member_group) self.yes_option = QuestionOption.objects.create(question=self.question_3, text="Yes", order=1) self.no_option = QuestionOption.objects.create(question=self.question_3, text="No", order=2) self.question_3.batches.add(self.batch) self.indicator = Indicator.objects.create(name="indicator name", description="rajni indicator", measure='Percentage', batch=self.batch, module=health_module) self.formula = Formula.objects.create(count=self.question_3, indicator=self.indicator) self.household_head_1 = self.create_household_head(0, self.investigator) self.household_head_2 = self.create_household_head(1, self.investigator) self.household_head_3 = self.create_household_head(2, self.investigator) self.household_head_4 = self.create_household_head(3, self.investigator) self.household_head_5 = self.create_household_head(4, self.investigator) self.household_head_6 = self.create_household_head(5, self.investigator_2) self.household_head_7 = self.create_household_head(6, self.investigator_2) self.household_head_8 = self.create_household_head(7, self.investigator_2) self.household_head_9 = self.create_household_head(8, self.investigator_2) def test_gets_location_names_and_data_series_for_a_parent_location_and_formula(self): self.investigator.member_answered(self.question_3, self.household_head_1, self.yes_option.order, self.batch) self.investigator.member_answered(self.question_3, self.household_head_2, self.yes_option.order, self.batch) self.investigator.member_answered(self.question_3, self.household_head_3, self.yes_option.order, self.batch) self.investigator.member_answered(self.question_3, self.household_head_4, self.no_option.order, self.batch) self.investigator.member_answered(self.question_3, self.household_head_5, self.no_option.order, self.batch) self.investigator_2.member_answered(self.question_3, self.household_head_6, self.yes_option.order, self.batch) self.investigator_2.member_answered(self.question_3, self.household_head_7, self.yes_option.order, self.batch) self.investigator_2.member_answered(self.question_3, self.household_head_8, self.no_option.order, self.batch) self.investigator_2.member_answered(self.question_3, self.household_head_9, self.no_option.order, self.batch) simple_indicator_service = SimpleIndicatorService(self.formula, self.uganda) region_responses = [{'data': [3, 2], 'name': self.yes_option.text}, {'data': [2, 2], 'name': self.no_option.text}] data_series, location = simple_indicator_service.get_location_names_and_data_series() self.assertEquals([self.central.name, self.west.name], location) self.assertEquals(region_responses, data_series) def test_formats_details_data(self): self.investigator.member_answered(self.question_3, self.household_head_1, self.yes_option.order, self.batch) self.investigator.member_answered(self.question_3, self.household_head_2, self.yes_option.order, self.batch) self.investigator.member_answered(self.question_3, self.household_head_3, self.yes_option.order, self.batch) self.investigator.member_answered(self.question_3, self.household_head_4, self.no_option.order, self.batch) self.investigator.member_answered(self.question_3, self.household_head_5, self.no_option.order, self.batch) self.investigator_2.member_answered(self.question_3, self.household_head_6, self.yes_option.order, self.batch) self.investigator_2.member_answered(self.question_3, self.household_head_7, self.yes_option.order, self.batch) self.investigator_2.member_answered(self.question_3, self.household_head_8, self.no_option.order, self.batch) self.investigator_2.member_answered(self.question_3, self.household_head_9, self.no_option.order, self.batch) kibungo = Location.objects.create(name="Kibungo", type=self.district, tree_parent=self.west) mpigi = Location.objects.create(name="Mpigi", type=self.district, tree_parent=self.central) table_row_1 = {'Region': self.central.name, 'District': self.kampala.name, self.yes_option.text: 3, self.no_option.text: 2,'Total': 5} table_row_2 = {'Region': self.central.name, 'District': mpigi.name, self.yes_option.text: 0, self.no_option.text: 0,'Total': 0} table_row_3 = {'Region': self.west.name, 'District': kibungo.name, self.yes_option.text: 0, self.no_option.text: 0,'Total': 0} table_row_4 = {'Region': self.west.name, 'District': self.mbarara.name, self.yes_option.text: 2, self.no_option.text: 2,'Total': 4} expected_table_data = [table_row_1, table_row_2, table_row_3, table_row_4] simple_indicator_service = SimpleIndicatorService(self.formula, self.uganda) tabulated_data = simple_indicator_service.tabulated_data_series() self.assertEqual(4, len(tabulated_data)) for i in range(4): self.assertEqual(expected_table_data[i], tabulated_data[i]) class GroupCountSimpleIndicatorServiceTest(BaseTest): def setUp(self): self.survey = Survey.objects.create(name="haha") self.batch = Batch.objects.create(order=1, survey=self.survey) self.country = LocationType.objects.create(name="Country", slug="country") self.region = LocationType.objects.create(name="Region", slug="region") self.district = LocationType.objects.create(name="District", slug='district') self.uganda = Location.objects.create(name="Uganda", type=self.country) self.west = Location.objects.create(name="WEST", type=self.region, tree_parent=self.uganda) self.central = Location.objects.create(name="CENTRAL", type=self.region, tree_parent=self.uganda) self.kampala = Location.objects.create(name="Kampala", tree_parent=self.central, type=self.district) self.mbarara = Location.objects.create(name="Mbarara", tree_parent=self.west, type=self.district) self.ea = EnumerationArea.objects.create(name="EA2", survey=self.survey) self.ea.locations.add(self.kampala) self.mbarara_ea = EnumerationArea.objects.create(name="EA3", survey=self.survey) self.mbarara_ea.locations.add(self.mbarara) backend = Backend.objects.create(name='something') health_module = QuestionModule.objects.create(name="Health") self.investigator = Investigator.objects.create(name="Investigator 1", mobile_number="1", ea=self.ea, backend=backend) self.investigator_2 = Investigator.objects.create(name="Investigator 1", mobile_number="33331", ea=self.mbarara_ea, backend=backend) self.general_group = HouseholdMemberGroup.objects.create(name="GENERAL", order=2) general_condition = GroupCondition.objects.create(attribute="GENERAL", value="HEAD", condition='EQUALS') self.general_group.conditions.add(general_condition) self.household_head_1 = self.create_household_head(0, self.investigator, survey=self.survey) self.household_head_2 = self.create_household_head(1, self.investigator, survey=self.survey) self.household_head_3 = self.create_household_head(2, self.investigator, survey=self.survey) self.household_head_4 = self.create_household_head(3, self.investigator, survey=self.survey) self.household_head_5 = self.create_household_head(4, self.investigator, survey=self.survey) self.household_head_6 = self.create_household_head(5, self.investigator_2, survey=self.survey) self.household_head_7 = self.create_household_head(6, self.investigator_2, survey=self.survey) self.household_head_8 = self.create_household_head(7, self.investigator_2, survey=self.survey) self.household_head_9 = self.create_household_head(8, self.investigator_2, survey=self.survey) self.indicator = Indicator.objects.create(name="indicator name", description="rajni indicator", measure='Percentage', batch=self.batch, module=health_module) self.formula = Formula.objects.create(groups=self.general_group, indicator=self.indicator) def test_gets_location_names_and_data_series_for_a_parent_location_and_formula(self): simple_indicator_service = SimpleIndicatorService(self.formula, self.uganda) region_responses = [{'data': [5, 4], 'name': self.general_group.name}] data_series, location = simple_indicator_service.get_location_names_and_data_series() self.assertEquals([self.central.name, self.west.name], location) self.assertEquals(region_responses, data_series) def test_formats_details_data(self): kibungo = Location.objects.create(name="Kibungo", type=self.district, tree_parent=self.west) mpigi = Location.objects.create(name="Mpigi", type=self.district, tree_parent=self.central) table_row_1 = {'Region': self.central.name, 'District': self.kampala.name, self.general_group.name: 5, 'Total': 5} table_row_2 = {'Region': self.central.name, 'District': mpigi.name, self.general_group.name: 0, 'Total': 0} table_row_3 = {'Region': self.west.name, 'District': kibungo.name, self.general_group.name: 0, 'Total': 0} table_row_4 = {'Region': self.west.name, 'District': self.mbarara.name, self.general_group.name: 4, 'Total': 4} expected_table_data = [table_row_1, table_row_2, table_row_3, table_row_4] simple_indicator_service = SimpleIndicatorService(self.formula, self.uganda) tabulated_data = simple_indicator_service.tabulated_data_series() self.assertEqual(4, len(tabulated_data)) for i in range(4): self.assertEqual(expected_table_data[i], tabulated_data[i])
bsd-3-clause
phobson/statsmodels
statsmodels/datasets/randhie/data.py
3
2650
"""RAND Health Insurance Experiment Data""" __docformat__ = 'restructuredtext' COPYRIGHT = """This is in the public domain.""" TITLE = __doc__ SOURCE = """ The data was collected by the RAND corporation as part of the Health Insurance Experiment (HIE). http://www.rand.org/health/projects/hie.html This data was used in:: Cameron, A.C. amd Trivedi, P.K. 2005. `Microeconometrics: Methods and Applications,` Cambridge: New York. And was obtained from: <http://cameron.econ.ucdavis.edu/mmabook/mmadata.html> See randhie/src for the original data and description. The data included here contains only a subset of the original data. The data varies slightly compared to that reported in Cameron and Trivedi. """ DESCRSHORT = """The RAND Co. Health Insurance Experiment Data""" DESCRLONG = """""" NOTE = """:: Number of observations - 20,190 Number of variables - 10 Variable name definitions:: mdvis - Number of outpatient visits to an MD lncoins - ln(coinsurance + 1), 0 <= coninsurance <= 100 idp - 1 if individual deductible plan, 0 otherwise lpi - ln(max(1, annual participation incentive payment)) fmde - 0 if idp = 1; ln(max(1, MDE/(0.01 coinsurance))) otherwise physlm - 1 if the person has a physical limitation disea - number of chronic diseases hlthg - 1 if self-rated health is good hlthf - 1 if self-rated health is fair hlthp - 1 if self-rated health is poor (Omitted category is excellent self-rated health) """ from numpy import recfromtxt, column_stack, array from statsmodels.datasets import utils as du from os.path import dirname, abspath PATH = '%s/%s' % (dirname(abspath(__file__)), 'randhie.csv') def load(): """ Loads the RAND HIE data and returns a Dataset class. ---------- endog - response variable, mdvis exog - design Returns Load instance: a class of the data with array attrbutes 'endog' and 'exog' """ data = _get_data() return du.process_recarray(data, endog_idx=0, dtype=float) def load_pandas(): """ Loads the RAND HIE data and returns a Dataset class. ---------- endog - response variable, mdvis exog - design Returns Load instance: a class of the data with array attrbutes 'endog' and 'exog' """ from pandas import read_csv data = read_csv(PATH) return du.process_recarray_pandas(data, endog_idx=0) def _get_data(): with open(PATH, "rb") as f: data = recfromtxt(f, delimiter=",", names=True, dtype=float) return data
bsd-3-clause
arnedesmedt/dotfiles
.config/sublime-text-3/Packages.symlinkfollow/pygments/all/pygments/lexers/pawn.py
47
8094
# -*- coding: utf-8 -*- """ pygments.lexers.pawn ~~~~~~~~~~~~~~~~~~~~ Lexers for the Pawn languages. :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Error from pygments.util import get_bool_opt __all__ = ['SourcePawnLexer', 'PawnLexer'] class SourcePawnLexer(RegexLexer): """ For SourcePawn source code with preprocessor directives. .. versionadded:: 1.6 """ name = 'SourcePawn' aliases = ['sp'] filenames = ['*.sp'] mimetypes = ['text/x-sourcepawn'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' #: only one /* */ style comment _ws1 = r'\s*(?:/[*].*?[*]/\s*)*' tokens = { 'root': [ # preprocessor directives: without whitespace ('^#if\s+0', Comment.Preproc, 'if0'), ('^#', Comment.Preproc, 'macro'), # or with whitespace ('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'), ('^' + _ws1 + '#', Comment.Preproc, 'macro'), (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), (r'[{}]', Punctuation), (r'L?"', String, 'string'), (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), (r'0[0-7]+[LlUu]*', Number.Oct), (r'\d+[LlUu]*', Number.Integer), (r'\*/', Error), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\],.;]', Punctuation), (r'(case|const|continue|native|' r'default|else|enum|for|if|new|operator|' r'public|return|sizeof|static|decl|struct|switch)\b', Keyword), (r'(bool|Float)\b', Keyword.Type), (r'(true|false)\b', Keyword.Constant), ('[a-zA-Z_]\w*', Name), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], 'macro': [ (r'[^/\n]+', Comment.Preproc), (r'/\*(.|\n)*?\*/', Comment.Multiline), (r'//.*?\n', Comment.Single, '#pop'), (r'/', Comment.Preproc), (r'(?<=\\)\n', Comment.Preproc), (r'\n', Comment.Preproc, '#pop'), ], 'if0': [ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'), (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'), (r'.*?\n', Comment), ] } SM_TYPES = set(('Action', 'bool', 'Float', 'Plugin', 'String', 'any', 'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType', 'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart', 'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow', 'ConVarBounds', 'QueryCookie', 'ReplySource', 'ConVarQueryResult', 'ConVarQueryFinished', 'Function', 'Action', 'Identity', 'PluginStatus', 'PluginInfo', 'DBResult', 'DBBindType', 'DBPriority', 'PropType', 'PropFieldType', 'MoveType', 'RenderMode', 'RenderFx', 'EventHookMode', 'EventHook', 'FileType', 'FileTimeMode', 'PathType', 'ParamType', 'ExecType', 'DialogType', 'Handle', 'KvDataTypes', 'NominateResult', 'MapChange', 'MenuStyle', 'MenuAction', 'MenuSource', 'RegexError', 'SDKCallType', 'SDKLibrary', 'SDKFuncConfSource', 'SDKType', 'SDKPassMethod', 'RayType', 'TraceEntityFilter', 'ListenOverride', 'SortOrder', 'SortType', 'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus', 'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond', 'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType', 'TopMenuPosition', 'TopMenuObject', 'UserMsg')) def __init__(self, **options): self.smhighlighting = get_bool_opt(options, 'sourcemod', True) self._functions = set() if self.smhighlighting: from pygments.lexers._sourcemod_builtins import FUNCTIONS self._functions.update(FUNCTIONS) RegexLexer.__init__(self, **options) def get_tokens_unprocessed(self, text): for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if self.smhighlighting: if value in self.SM_TYPES: token = Keyword.Type elif value in self._functions: token = Name.Builtin yield index, token, value class PawnLexer(RegexLexer): """ For Pawn source code. .. versionadded:: 2.0 """ name = 'Pawn' aliases = ['pawn'] filenames = ['*.p', '*.pwn', '*.inc'] mimetypes = ['text/x-pawn'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/[*][\w\W]*?[*]/)+' #: only one /* */ style comment _ws1 = r'\s*(?:/[*].*?[*]/\s*)*' tokens = { 'root': [ # preprocessor directives: without whitespace ('^#if\s+0', Comment.Preproc, 'if0'), ('^#', Comment.Preproc, 'macro'), # or with whitespace ('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'), ('^' + _ws1 + '#', Comment.Preproc, 'macro'), (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?\*[\w\W]*?\*(\\\n)?/', Comment.Multiline), (r'[{}]', Punctuation), (r'L?"', String, 'string'), (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), (r'0[0-7]+[LlUu]*', Number.Oct), (r'\d+[LlUu]*', Number.Integer), (r'\*/', Error), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\],.;]', Punctuation), (r'(switch|case|default|const|new|static|char|continue|break|' r'if|else|for|while|do|operator|enum|' r'public|return|sizeof|tagof|state|goto)\b', Keyword), (r'(bool|Float)\b', Keyword.Type), (r'(true|false)\b', Keyword.Constant), ('[a-zA-Z_]\w*', Name), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], 'macro': [ (r'[^/\n]+', Comment.Preproc), (r'/\*(.|\n)*?\*/', Comment.Multiline), (r'//.*?\n', Comment.Single, '#pop'), (r'/', Comment.Preproc), (r'(?<=\\)\n', Comment.Preproc), (r'\n', Comment.Preproc, '#pop'), ], 'if0': [ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'), (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'), (r'.*?\n', Comment), ] }
mit
grlee77/nipype
nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py
9
1256
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.filtering.arithmetic import MaskScalarVolume def test_MaskScalarVolume_inputs(): input_map = dict(InputVolume=dict(argstr='%s', position=-3, ), MaskVolume=dict(argstr='%s', position=-2, ), OutputVolume=dict(argstr='%s', hash_files=False, position=-1, ), args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), label=dict(argstr='--label %d', ), replace=dict(argstr='--replace %d', ), terminal_output=dict(nohash=True, ), ) inputs = MaskScalarVolume.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MaskScalarVolume_outputs(): output_map = dict(OutputVolume=dict(position=-1, ), ) outputs = MaskScalarVolume.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause
yiqingj/airflow
airflow/executors/__init__.py
9
1759
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from airflow import configuration from airflow.executors.base_executor import BaseExecutor from airflow.executors.local_executor import LocalExecutor from airflow.executors.sequential_executor import SequentialExecutor try: from airflow.executors.celery_executor import CeleryExecutor except: pass from airflow.exceptions import AirflowException _EXECUTOR = configuration.get('core', 'EXECUTOR') if _EXECUTOR == 'LocalExecutor': DEFAULT_EXECUTOR = LocalExecutor() elif _EXECUTOR == 'CeleryExecutor': DEFAULT_EXECUTOR = CeleryExecutor() elif _EXECUTOR == 'SequentialExecutor': DEFAULT_EXECUTOR = SequentialExecutor() elif _EXECUTOR == 'MesosExecutor': from airflow.contrib.executors.mesos_executor import MesosExecutor DEFAULT_EXECUTOR = MesosExecutor() else: # Loading plugins from airflow.plugins_manager import executors as _executors for _executor in _executors: globals()[_executor.__name__] = _executor if _EXECUTOR in globals(): DEFAULT_EXECUTOR = globals()[_EXECUTOR]() else: raise AirflowException("Executor {0} not supported.".format(_EXECUTOR)) logging.info("Using executor " + _EXECUTOR)
apache-2.0
gsnbng/erpnext
erpnext/patches/v11_0/move_item_defaults_to_child_table_for_multicompany.py
10
3389
# Copyright (c) 2018, Frappe and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe def execute(): ''' Fields to move from the item to item defaults child table [ default_warehouse, buying_cost_center, expense_account, selling_cost_center, income_account ] ''' if not frappe.db.has_column('Item', 'default_warehouse'): return frappe.reload_doc('stock', 'doctype', 'item_default') frappe.reload_doc('stock', 'doctype', 'item') companies = frappe.get_all("Company") if len(companies) == 1 and not frappe.get_all("Item Default", limit=1): try: frappe.db.sql(''' INSERT INTO `tabItem Default` (name, parent, parenttype, parentfield, idx, company, default_warehouse, buying_cost_center, selling_cost_center, expense_account, income_account, default_supplier) SELECT SUBSTRING(SHA2(name,224), 1, 10) as name, name as parent, 'Item' as parenttype, 'item_defaults' as parentfield, 1 as idx, %s as company, default_warehouse, buying_cost_center, selling_cost_center, expense_account, income_account, default_supplier FROM `tabItem`; ''', companies[0].name) except: pass else: item_details = frappe.db.sql(""" SELECT name, default_warehouse, buying_cost_center, expense_account, selling_cost_center, income_account FROM tabItem WHERE name not in (select distinct parent from `tabItem Default`) and ifnull(disabled, 0) = 0""" , as_dict=1) items_default_data = {} for item_data in item_details: for d in [["default_warehouse", "Warehouse"], ["expense_account", "Account"], ["income_account", "Account"], ["buying_cost_center", "Cost Center"], ["selling_cost_center", "Cost Center"]]: if item_data.get(d[0]): company = frappe.get_value(d[1], item_data.get(d[0]), "company", cache=True) if item_data.name not in items_default_data: items_default_data[item_data.name] = {} company_wise_data = items_default_data[item_data.name] if company not in company_wise_data: company_wise_data[company] = {} default_data = company_wise_data[company] default_data[d[0]] = item_data.get(d[0]) to_insert_data = [] # items_default_data data structure will be as follow # { # 'item_code 1': {'company 1': {'default_warehouse': 'Test Warehouse 1'}}, # 'item_code 2': { # 'company 1': {'default_warehouse': 'Test Warehouse 1'}, # 'company 2': {'default_warehouse': 'Test Warehouse 1'} # } # } for item_code, companywise_item_data in items_default_data.items(): for company, item_default_data in companywise_item_data.items(): to_insert_data.append(( frappe.generate_hash("", 10), item_code, 'Item', 'item_defaults', company, item_default_data.get('default_warehouse'), item_default_data.get('expense_account'), item_default_data.get('income_account'), item_default_data.get('buying_cost_center'), item_default_data.get('selling_cost_center'), )) if to_insert_data: frappe.db.sql(''' INSERT INTO `tabItem Default` ( `name`, `parent`, `parenttype`, `parentfield`, `company`, `default_warehouse`, `expense_account`, `income_account`, `buying_cost_center`, `selling_cost_center` ) VALUES {} '''.format(', '.join(['%s'] * len(to_insert_data))), tuple(to_insert_data))
agpl-3.0
pr-omethe-us/PyKED
pyked/chemked.py
1
44185
""" Main ChemKED module """ # Standard libraries from os.path import exists from collections import namedtuple from warnings import warn from copy import deepcopy import xml.etree.ElementTree as etree import xml.dom.minidom as minidom from itertools import chain import numpy as np # Local imports from .validation import schema, OurValidator, yaml, Q_ from .converters import datagroup_properties, ReSpecTh_to_ChemKED VolumeHistory = namedtuple('VolumeHistory', ['time', 'volume']) VolumeHistory.__doc__ = 'Time history of the volume in an RCM experiment. Deprecated, to be removed after PyKED 0.4' # noqa: E501 VolumeHistory.time.__doc__ = '(`~numpy.ndarray`): the time during the experiment' VolumeHistory.volume.__doc__ = '(`~numpy.ndarray`): the volume during the experiment' TimeHistory = namedtuple('TimeHistory', ['time', 'quantity', 'type']) TimeHistory.__doc__ = 'Time history of the quantity in an RCM experiment' TimeHistory.time.__doc__ = '(`~numpy.ndarray`): the time during the experiment' TimeHistory.quantity.__doc__ = '(`~numpy.ndarray`): the quantity of interest during the experiment' TimeHistory.type.__doc__ = """\ (`str`): the type of time history represented. Possible options are: * volume * temperature * pressure * piston position * light emission * OH emission * absorption """ RCMData = namedtuple( 'RCMData', ['compressed_pressure', 'compressed_temperature', 'compression_time', 'stroke', 'clearance', 'compression_ratio'] ) RCMData.__doc__ = 'Data fields specific to rapid compression machine experiments' RCMData.compressed_pressure.__doc__ = '(`~pint.Quantity`) The pressure at the end of compression' RCMData.compressed_temperature.__doc__ = """\ (`~pint.Quantity`) The temperature at the end of compression""" RCMData.compression_time.__doc__ = '(`~pint.Quantity`) The duration of the compression stroke' RCMData.stroke.__doc__ = '(`~pint.Quantity`) The length of the stroke' RCMData.clearance.__doc__ = """\ (`~pint.Quantity`) The clearance between piston face and end wall at the end of compression""" RCMData.compression_ratio.__doc__ = '(`~pint.Quantity`) The volumetric compression ratio' Reference = namedtuple('Reference', ['volume', 'journal', 'doi', 'authors', 'detail', 'year', 'pages']) Reference.__doc__ = 'Information about the article or report where the data can be found' Reference.volume.__doc__ = '(`str`) The journal volume' Reference.journal.__doc__ = '(`str`) The name of the journal' Reference.doi.__doc__ = '(`str`) The Digital Object Identifier of the article' Reference.authors.__doc__ = '(`list`) The list of authors of the article' Reference.detail.__doc__ = '(`str`) Detail about where the data can be found in the article' Reference.year.__doc__ = '(`str`) The year the article was published' Reference.pages.__doc__ = '(`str`) The pages in the journal where the article was published' Apparatus = namedtuple('Apparatus', ['kind', 'institution', 'facility']) Apparatus.__doc__ = 'Information about the experimental apparatus used to generate the data' Apparatus.kind.__doc__ = '(`str`) The kind of experimental apparatus' Apparatus.institution.__doc__ = '(`str`) The institution where the experiment is located' Apparatus.facility.__doc__ = '(`str`) The particular experimental facility at the location' Composition = namedtuple('Composition', 'species_name InChI SMILES atomic_composition amount') Composition.__doc__ = 'Detail of the initial composition of the mixture for the experiment' Composition.species_name.__doc__ = '(`str`) The name of the species' Composition.InChI.__doc__ = '(`str`) The InChI identifier for the species' Composition.SMILES.__doc__ = '(`str`) The SMILES identifier for the species' Composition.atomic_composition.__doc__ = '(`dict`) The atomic composition of the species' Composition.amount.__doc__ = '(`~pint.Quantity`) The amount of this species' class ChemKED(object): """Main ChemKED class. The ChemKED class stores information about the contents of a ChemKED database file. It stores each datapoint associated with the database and provides access the the reference information, versions, and file author. Arguments: yaml_file (`str`, optional): The filename of the YAML database in ChemKED format. dict_input (`dict`, optional): A dictionary with the parsed ouput of YAML file in ChemKED format. skip_validation (`bool`, optional): Whether validation of the ChemKED should be done. Must be supplied as a keyword-argument. Attributes: datapoints (`list`): List of `DataPoint` objects storing each datapoint in the database. reference (`~collections.namedtuple`): Attributes include ``volume``, ``journal``, ``doi``, ``authors``, ``detail``, ``year``, and ``pages`` describing the reference from which the datapoints are derived. apparatus (`~collections.namedtuple`): Attributes include ``kind`` of experimental apparatus, and the ``institution`` and ``facility`` where the experimental apparatus is located. chemked_version (`str`): Version of the ChemKED database schema used in this file. experiment_type (`str`): Type of exeperimental data contained in this database. file_author (`dict`): Information about the author of the ChemKED database file. file_version (`str`): Version of the ChemKED database file. _properties (`dict`): Original dictionary read from ChemKED database file, meant for internal use. """ def __init__(self, yaml_file=None, dict_input=None, *, skip_validation=False): if yaml_file is not None: with open(yaml_file, 'r') as f: self._properties = yaml.safe_load(f) elif dict_input is not None: self._properties = dict_input else: raise NameError("ChemKED needs either a YAML filename or dictionary as input.") if not skip_validation: self.validate_yaml(self._properties) self.datapoints = [] for point in self._properties['datapoints']: self.datapoints.append(DataPoint(point)) self.reference = Reference( volume=self._properties['reference'].get('volume'), journal=self._properties['reference'].get('journal'), doi=self._properties['reference'].get('doi'), authors=self._properties['reference'].get('authors'), detail=self._properties['reference'].get('detail'), year=self._properties['reference'].get('year'), pages=self._properties['reference'].get('pages'), ) self.apparatus = Apparatus( kind=self._properties['apparatus'].get('kind'), institution=self._properties['apparatus'].get('institution'), facility=self._properties['apparatus'].get('facility'), ) for prop in ['chemked-version', 'experiment-type', 'file-authors', 'file-version']: setattr(self, prop.replace('-', '_'), self._properties[prop]) @classmethod def from_respecth(cls, filename_xml, file_author='', file_author_orcid=''): """Construct a ChemKED instance directly from a ReSpecTh file. Arguments: filename_xml (`str`): Filename of the ReSpecTh-formatted XML file to be imported file_author (`str`, optional): File author to be added to the list generated from the XML file file_author_orcid (`str`, optional): ORCID for the file author being added to the list of file authors Returns: `ChemKED`: Instance of the `ChemKED` class containing the data in ``filename_xml``. Examples: >>> ck = ChemKED.from_respecth('respecth_file.xml') >>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber') >>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber', file_author_orcid='0000-0000-0000-0000') """ properties = ReSpecTh_to_ChemKED(filename_xml, file_author, file_author_orcid, validate=False) return cls(dict_input=properties) def validate_yaml(self, properties): """Validate the parsed YAML file for adherance to the ChemKED format. Arguments: properties (`dict`): Dictionary created from the parsed YAML file Raises: `ValueError`: If the YAML file cannot be validated, a `ValueError` is raised whose string contains the errors that are present. """ validator = OurValidator(schema) if not validator.validate(properties): for key, value in validator.errors.items(): if any(['unallowed value' in v for v in value]): print(('{key} has an illegal value. Allowed values are {values} and are case ' 'sensitive.').format(key=key, values=schema[key]['allowed'])) raise ValueError(validator.errors) def get_dataframe(self, output_columns=None): """Get a Pandas DataFrame of the datapoints in this instance. Arguments: output_columns (`list`, optional): List of strings specifying the columns to include in the output DataFrame. The default is `None`, which outputs all of the columns. Options include (not case sensitive): * ``Temperature`` * ``Pressure`` * ``Ignition Delay`` * ``Composition`` * ``Equivalence Ratio`` * ``Reference`` * ``Apparatus`` * ``Experiment Type`` * ``File Author`` * ``File Version`` * ``ChemKED Version`` In addition, specific fields from the ``Reference`` and ``Apparatus`` attributes can be included by specifying the name after a colon. These options are: * ``Reference:Volume`` * ``Reference:Journal`` * ``Reference:DOI`` * ``Reference:Authors`` * ``Reference:Detail`` * ``Reference:Year`` * ``Reference:Pages`` * ``Apparatus:Kind`` * ``Apparatus:Facility`` * ``Apparatus:Institution`` Only the first author is printed when ``Reference`` or ``Reference:Authors`` is selected because the whole author list may be quite long. Note: If the Composition is selected as an output type, the composition specified in the `DataPoint` is used. No attempt is made to convert to a consistent basis; mole fractions will remain mole fractions, mass fractions will remain mass fractions, and mole percent will remain mole percent. Therefore, it is possible to end up with more than one type of composition specification in a given column. However, if the composition is included in the resulting dataframe, the type of each composition will be specified by the "Kind" field in each row. Examples: >>> df = ChemKED(yaml_file).get_dataframe() >>> df = ChemKED(yaml_file).get_dataframe(['Temperature', 'Ignition Delay']) Returns: `~pandas.DataFrame`: Contains the information regarding each point in the ``datapoints`` attribute """ import pandas as pd valid_labels = [a.replace('_', ' ') for a in self.__dict__ if not (a.startswith('__') or a.startswith('_')) ] valid_labels.remove('datapoints') valid_labels.extend( ['composition', 'ignition delay', 'temperature', 'pressure', 'equivalence ratio'] ) ref_index = valid_labels.index('reference') valid_labels[ref_index:ref_index + 1] = ['reference:' + a for a in Reference._fields] app_index = valid_labels.index('apparatus') valid_labels[app_index:app_index + 1] = ['apparatus:' + a for a in Apparatus._fields] species_list = list(set(chain(*[list(d.composition.keys()) for d in self.datapoints]))) if output_columns is None or len(output_columns) == 0: col_labels = valid_labels comp_index = col_labels.index('composition') col_labels[comp_index:comp_index + 1] = species_list + ['Composition:Kind'] else: output_columns = [a.lower() for a in output_columns] col_labels = [] for col in output_columns: if col in valid_labels or col in ['reference', 'apparatus']: col_labels.append(col) else: raise ValueError('{} is not a valid output column choice'.format(col)) if 'composition' in col_labels: comp_index = col_labels.index('composition') col_labels[comp_index:comp_index + 1] = species_list + ['Composition:Kind'] if 'reference' in col_labels: ref_index = col_labels.index('reference') col_labels[ref_index:ref_index + 1] = ['reference:' + a for a in Reference._fields] if 'apparatus' in col_labels: app_index = col_labels.index('apparatus') col_labels[app_index:app_index + 1] = ['apparatus:' + a for a in Apparatus._fields] data = [] for d in self.datapoints: row = [] d_species = list(d.composition.keys()) for col in col_labels: if col in species_list: if col in d_species: row.append(d.composition[col].amount) else: row.append(Q_(0.0, 'dimensionless')) elif 'reference' in col or 'apparatus' in col: split_col = col.split(':') if split_col[1] == 'authors': row.append(getattr(getattr(self, split_col[0]), split_col[1])[0]['name']) else: row.append(getattr(getattr(self, split_col[0]), split_col[1])) elif col in ['temperature', 'pressure', 'ignition delay', 'equivalence ratio']: row.append(getattr(d, col.replace(' ', '_'))) elif col == 'file authors': row.append(getattr(self, col.replace(' ', '_'))[0]['name']) elif col == 'Composition:Kind': row.append(d.composition_type) else: row.append(getattr(self, col.replace(' ', '_'))) data.append(row) col_labels = [a.title() for a in col_labels] columns = pd.Index(col_labels) return pd.DataFrame(data=data, columns=columns) def write_file(self, filename, *, overwrite=False): """Write new ChemKED YAML file based on object. Arguments: filename (`str`): Filename for target YAML file overwrite (`bool`, optional): Whether to overwrite file with given name if present. Must be supplied as a keyword-argument. Raises: `NameError`: If ``filename`` is already present, and ``overwrite`` is not ``True``. Example: >>> dataset = ChemKED(yaml_file) >>> dataset.write_file(new_yaml_file) """ # Ensure file isn't already present if exists(filename) and not overwrite: raise OSError(filename + ' already present. Specify "overwrite=True" ' 'to overwrite, or rename.' ) with open(filename, 'w') as yaml_file: yaml.dump(self._properties, yaml_file) def convert_to_ReSpecTh(self, filename): """Convert ChemKED record to ReSpecTh XML file. This converter uses common information in a ChemKED file to generate a ReSpecTh XML file. Note that some information may be lost, as ChemKED stores some additional attributes. Arguments: filename (`str`): Filename for output ReSpecTh XML file. Example: >>> dataset = ChemKED(yaml_file) >>> dataset.convert_to_ReSpecTh(xml_file) """ root = etree.Element('experiment') file_author = etree.SubElement(root, 'fileAuthor') file_author.text = self.file_authors[0]['name'] # right now ChemKED just uses an integer file version file_version = etree.SubElement(root, 'fileVersion') major_version = etree.SubElement(file_version, 'major') major_version.text = str(self.file_version) minor_version = etree.SubElement(file_version, 'minor') minor_version.text = '0' respecth_version = etree.SubElement(root, 'ReSpecThVersion') major_version = etree.SubElement(respecth_version, 'major') major_version.text = '1' minor_version = etree.SubElement(respecth_version, 'minor') minor_version.text = '0' # Only ignition delay currently supported exp = etree.SubElement(root, 'experimentType') if self.experiment_type == 'ignition delay': exp.text = 'Ignition delay measurement' else: raise NotImplementedError('Only ignition delay type supported for conversion.') reference = etree.SubElement(root, 'bibliographyLink') citation = '' for author in self.reference.authors: citation += author['name'] + ', ' citation += (self.reference.journal + ' (' + str(self.reference.year) + ') ' + str(self.reference.volume) + ':' + self.reference.pages + '. ' + self.reference.detail ) reference.set('preferredKey', citation) reference.set('doi', self.reference.doi) apparatus = etree.SubElement(root, 'apparatus') kind = etree.SubElement(apparatus, 'kind') kind.text = self.apparatus.kind common_properties = etree.SubElement(root, 'commonProperties') # ChemKED objects have no common properties once loaded. Check for properties # among datapoints that tend to be common common = [] composition = self.datapoints[0].composition # Composition type *has* to be the same composition_type = self.datapoints[0].composition_type if not all(dp.composition_type == composition_type for dp in self.datapoints): raise NotImplementedError('Error: ReSpecTh does not support varying composition ' 'type among datapoints.' ) if all([composition == dp.composition for dp in self.datapoints]): # initial composition is common common.append('composition') prop = etree.SubElement(common_properties, 'property') prop.set('name', 'initial composition') for species_name, species in composition.items(): component = etree.SubElement(prop, 'component') species_link = etree.SubElement(component, 'speciesLink') species_link.set('preferredKey', species_name) if species.InChI is not None: species_link.set('InChI', species.InChI) amount = etree.SubElement(component, 'amount') amount.set('units', composition_type) amount.text = str(species.amount.magnitude) # If multiple datapoints present, then find any common properties. If only # one datapoint, then composition should be the only "common" property. if len(self.datapoints) > 1: for prop_name in datagroup_properties: attribute = prop_name.replace(' ', '_') quantities = [getattr(dp, attribute, False) for dp in self.datapoints] # All quantities must have the property in question and all the # values must be equal if all(quantities) and quantities.count(quantities[0]) == len(quantities): common.append(prop_name) prop = etree.SubElement(common_properties, 'property') prop.set('description', '') prop.set('name', prop_name) prop.set('units', str(quantities[0].units)) value = etree.SubElement(prop, 'value') value.text = str(quantities[0].magnitude) # Ignition delay can't be common, unless only a single datapoint. datagroup = etree.SubElement(root, 'dataGroup') datagroup.set('id', 'dg1') datagroup_link = etree.SubElement(datagroup, 'dataGroupLink') datagroup_link.set('dataGroupID', '') datagroup_link.set('dataPointID', '') property_idx = {} labels = {'temperature': 'T', 'pressure': 'P', 'ignition delay': 'tau', 'pressure rise': 'dP/dt', } for prop_name in datagroup_properties: attribute = prop_name.replace(' ', '_') # This can't be hasattr because properties are set to the value None # if no value is specified in the file, so the attribute always exists prop_indices = [i for i, dp in enumerate(self.datapoints) if getattr(dp, attribute) is not None ] if prop_name in common or not prop_indices: continue prop = etree.SubElement(datagroup, 'property') prop.set('description', '') prop.set('name', prop_name) units = str(getattr(self.datapoints[prop_indices[0]], attribute).units) prop.set('units', units) idx = 'x{}'.format(len(property_idx) + 1) property_idx[idx] = {'name': prop_name, 'units': units} prop.set('id', idx) prop.set('label', labels[prop_name]) # Need to handle datapoints with possibly different species in the initial composition if 'composition' not in common: for dp in self.datapoints: for species in dp.composition.values(): # Only add new property for species not already considered has_spec = any([species.species_name in d.values() for d in property_idx.values() ]) if not has_spec: prop = etree.SubElement(datagroup, 'property') prop.set('description', '') idx = 'x{}'.format(len(property_idx) + 1) property_idx[idx] = {'name': species.species_name} prop.set('id', idx) prop.set('label', '[' + species.species_name + ']') prop.set('name', 'composition') prop.set('units', self.datapoints[0].composition_type) species_link = etree.SubElement(prop, 'speciesLink') species_link.set('preferredKey', species.species_name) if species.InChI is not None: species_link.set('InChI', species.InChI) for dp in self.datapoints: datapoint = etree.SubElement(datagroup, 'dataPoint') for idx, val in property_idx.items(): # handle regular properties a bit differently than composition if val['name'] in datagroup_properties: value = etree.SubElement(datapoint, idx) quantity = getattr(dp, val['name'].replace(' ', '_')).to(val['units']) value.text = str(quantity.magnitude) else: # composition for item in dp.composition.values(): if item.species_name == val['name']: value = etree.SubElement(datapoint, idx) value.text = str(item.amount.magnitude) # See https://stackoverflow.com/a/16097112 for the None.__ne__ history_types = ['volume_history', 'temperature_history', 'pressure_history', 'piston_position_history', 'light_emission_history', 'OH_emission_history', 'absorption_history'] time_histories = [getattr(dp, p) for dp in self.datapoints for p in history_types] time_histories = list(filter(None.__ne__, time_histories)) if len(self.datapoints) > 1 and len(time_histories) > 1: raise NotImplementedError('Error: ReSpecTh files do not support multiple datapoints ' 'with a time history.') elif len(time_histories) > 0: for dg_idx, hist in enumerate(time_histories): if hist.type not in ['volume', 'temperature', 'pressure']: warn('The time-history type {} is not supported by ReSpecTh for ' 'ignition delay experiments'.format(hist.type)) continue datagroup = etree.SubElement(root, 'dataGroup') datagroup.set('id', 'dg{}'.format(dg_idx)) datagroup_link = etree.SubElement(datagroup, 'dataGroupLink') datagroup_link.set('dataGroupID', '') datagroup_link.set('dataPointID', '') # Time history has two properties: time and quantity. prop = etree.SubElement(datagroup, 'property') prop.set('description', '') prop.set('name', 'time') prop.set('units', str(hist.time.units)) time_idx = 'x{}'.format(len(property_idx) + 1) property_idx[time_idx] = {'name': 'time'} prop.set('id', time_idx) prop.set('label', 't') prop = etree.SubElement(datagroup, 'property') prop.set('description', '') prop.set('name', hist.type) prop.set('units', str(hist.quantity.units)) quant_idx = 'x{}'.format(len(property_idx) + 1) property_idx[quant_idx] = {'name': hist.type} prop.set('id', quant_idx) prop.set('label', 'V') for time, quantity in zip(hist.time, hist.quantity): datapoint = etree.SubElement(datagroup, 'dataPoint') value = etree.SubElement(datapoint, time_idx) value.text = str(time.magnitude) value = etree.SubElement(datapoint, quant_idx) value.text = str(quantity.magnitude) ign_types = [getattr(dp, 'ignition_type', False) for dp in self.datapoints] # All datapoints must have the same ignition target and type if all(ign_types) and ign_types.count(ign_types[0]) == len(ign_types): # In ReSpecTh files all datapoints must share ignition type ignition = etree.SubElement(root, 'ignitionType') if ign_types[0]['target'] in ['pressure', 'temperature']: ignition.set('target', ign_types[0]['target'][0].upper()) else: # options left are species ignition.set('target', self.datapoints[0].ignition_type['target']) if ign_types[0]['type'] == 'd/dt max extrapolated': ignition.set('type', 'baseline max intercept from d/dt') else: ignition.set('type', self.datapoints[0].ignition_type['type']) else: raise NotImplementedError('Different ignition targets or types for multiple datapoints ' 'are not supported in ReSpecTh.') et = etree.ElementTree(root) et.write(filename, encoding='utf-8', xml_declaration=True) # now do a "pretty" rewrite xml = minidom.parse(filename) xml_string = xml.toprettyxml(indent=' ') with open(filename, 'w') as f: f.write(xml_string) print('Converted to ' + filename) class DataPoint(object): """Class for a single datapoint. The `DataPoint` class stores the information associated with a single data point in the dataset parsed from the `ChemKED` YAML input. Arguments: properties (`dict`): Dictionary adhering to the ChemKED format for ``datapoints`` Attributes: composition (`list`): List of dictionaries representing the species and their quantities ignition_delay (pint.Quantity): The ignition delay of the experiment temperature (pint.Quantity): The temperature of the experiment pressure (pint.Quantity): The pressure of the experiment pressure_rise (pint.Quantity, optional): The amount of pressure rise during the induction period of a shock tube experiment. compression_time (pint.Quantity, optional): The compression time for an RCM experiment. compressed_pressure (pint.Quantity, optional): The pressure at the end of compression for an RCM experiment. compressed_temperature (pint.Quantity, optional): The temperature at the end of compression for an RCM experiment. first_stage_ignition_delay (pint.Quantity, optional): The first stage ignition delay of the experiment. compression_time (pint.Quantity, optional): The compression time for an RCM experiment. ignition_type (`dict`): Dictionary with the ignition target and type. volume_history (`~collections.namedtuple`, optional): The volume history of the reactor during an RCM experiment. pressure_history (`~collections.namedtuple`, optional): The pressure history of the reactor during an experiment. temperature_history (`~collections.namedtuple`, optional): The temperature history of the reactor during an experiment. piston_position_history (`~collections.namedtuple`, optional): The piston position history of the reactor during an RCM experiment. light_emission_history (`~collections.namedtuple`, optional): The light emission history of the reactor during an experiment. OH_emission_history (`~collections.namedtuple`, optional): The OH emission history of the reactor during an experiment. absorption_history (`~collections.namedtuple`, optional): The absorption history of the reactor during an experiment. """ value_unit_props = [ 'ignition-delay', 'first-stage-ignition-delay', 'temperature', 'pressure', 'pressure-rise', ] rcm_data_props = [ 'compressed-pressure', 'compressed-temperature', 'compression-time', 'stroke', 'clearance', 'compression-ratio' ] def __init__(self, properties): for prop in self.value_unit_props: if prop in properties: quant = self.process_quantity(properties[prop]) setattr(self, prop.replace('-', '_'), quant) else: setattr(self, prop.replace('-', '_'), None) if 'rcm-data' in properties: orig_rcm_data = properties['rcm-data'] rcm_props = {} for prop in self.rcm_data_props: if prop in orig_rcm_data: quant = self.process_quantity(orig_rcm_data[prop]) rcm_props[prop.replace('-', '_')] = quant else: rcm_props[prop.replace('-', '_')] = None self.rcm_data = RCMData(**rcm_props) else: self.rcm_data = None self.composition_type = properties['composition']['kind'] composition = {} for species in properties['composition']['species']: species_name = species['species-name'] amount = self.process_quantity(species['amount']) InChI = species.get('InChI') SMILES = species.get('SMILES') atomic_composition = species.get('atomic-composition') composition[species_name] = Composition( species_name=species_name, InChI=InChI, SMILES=SMILES, atomic_composition=atomic_composition, amount=amount) setattr(self, 'composition', composition) self.equivalence_ratio = properties.get('equivalence-ratio') self.ignition_type = deepcopy(properties.get('ignition-type')) if 'time-histories' in properties and 'volume-history' in properties: raise TypeError('time-histories and volume-history are mutually exclusive') if 'time-histories' in properties: for hist in properties['time-histories']: if hasattr(self, '{}_history'.format(hist['type'].replace(' ', '_'))): raise ValueError('Each history type may only be specified once. {} was ' 'specified multiple times'.format(hist['type'])) time_col = hist['time']['column'] time_units = hist['time']['units'] quant_col = hist['quantity']['column'] quant_units = hist['quantity']['units'] if isinstance(hist['values'], list): values = np.array(hist['values']) else: # Load the values from a file values = np.genfromtxt(hist['values']['filename'], delimiter=',') time_history = TimeHistory( time=Q_(values[:, time_col], time_units), quantity=Q_(values[:, quant_col], quant_units), type=hist['type'], ) setattr(self, '{}_history'.format(hist['type'].replace(' ', '_')), time_history) if 'volume-history' in properties: warn('The volume-history field should be replaced by time-histories. ' 'volume-history will be removed after PyKED 0.4', DeprecationWarning) time_col = properties['volume-history']['time']['column'] time_units = properties['volume-history']['time']['units'] volume_col = properties['volume-history']['volume']['column'] volume_units = properties['volume-history']['volume']['units'] values = np.array(properties['volume-history']['values']) self.volume_history = VolumeHistory( time=Q_(values[:, time_col], time_units), volume=Q_(values[:, volume_col], volume_units), ) history_types = ['volume', 'temperature', 'pressure', 'piston_position', 'light_emission', 'OH_emission', 'absorption'] for h in history_types: if not hasattr(self, '{}_history'.format(h)): setattr(self, '{}_history'.format(h), None) def process_quantity(self, properties): """Process the uncertainty information from a given quantity and return it """ quant = Q_(properties[0]) if len(properties) > 1: unc = properties[1] uncertainty = unc.get('uncertainty', False) upper_uncertainty = unc.get('upper-uncertainty', False) lower_uncertainty = unc.get('lower-uncertainty', False) uncertainty_type = unc.get('uncertainty-type') if uncertainty_type == 'relative': if uncertainty: quant = quant.plus_minus(float(uncertainty), relative=True) elif upper_uncertainty and lower_uncertainty: warn('Asymmetric uncertainties are not supported. The ' 'maximum of lower-uncertainty and upper-uncertainty ' 'has been used as the symmetric uncertainty.') uncertainty = max(float(upper_uncertainty), float(lower_uncertainty)) quant = quant.plus_minus(uncertainty, relative=True) else: raise ValueError('Either "uncertainty" or "upper-uncertainty" and ' '"lower-uncertainty" need to be specified.') elif uncertainty_type == 'absolute': if uncertainty: uncertainty = Q_(uncertainty) quant = quant.plus_minus(uncertainty.to(quant.units).magnitude) elif upper_uncertainty and lower_uncertainty: warn('Asymmetric uncertainties are not supported. The ' 'maximum of lower-uncertainty and upper-uncertainty ' 'has been used as the symmetric uncertainty.') uncertainty = max(Q_(upper_uncertainty), Q_(lower_uncertainty)) quant = quant.plus_minus(uncertainty.to(quant.units).magnitude) else: raise ValueError('Either "uncertainty" or "upper-uncertainty" and ' '"lower-uncertainty" need to be specified.') else: raise ValueError('uncertainty-type must be one of "absolute" or "relative"') return quant def get_cantera_composition_string(self, species_conversion=None): """Get the composition in a string format suitable for input to Cantera. Returns a formatted string no matter the type of composition. As such, this method is not recommended for end users; instead, prefer the `get_cantera_mole_fraction` or `get_cantera_mass_fraction` methods. Arguments: species_conversion (`dict`, optional): Mapping of species identifier to a species name. This argument should be supplied when the name of the species in the ChemKED YAML file does not match the name of the same species in a chemical kinetic mechanism. The species identifier (the key of the mapping) can be the name, InChI, or SMILES provided in the ChemKED file, while the value associated with a key should be the desired name in the Cantera format output string. Returns: `str`: String in the ``SPEC:AMT, SPEC:AMT`` format Raises: `ValueError`: If the composition type of the `DataPoint` is not one of ``'mass fraction'``, ``'mole fraction'``, or ``'mole percent'`` """ if self.composition_type in ['mole fraction', 'mass fraction']: factor = 1.0 elif self.composition_type == 'mole percent': factor = 100.0 else: raise ValueError('Unknown composition type: {}'.format(self.composition_type)) if species_conversion is None: comps = ['{!s}:{:.4e}'.format(c.species_name, c.amount.magnitude/factor) for c in self.composition.values()] else: comps = [] for c in self.composition.values(): amount = c.amount.magnitude/factor idents = [getattr(c, s, False) for s in ['species_name', 'InChI', 'SMILES']] present = [i in species_conversion for i in idents] if not any(present): comps.append('{!s}:{:.4e}'.format(c.species_name, amount)) else: if len([i for i in present if i]) > 1: raise ValueError('More than one conversion present for species {}'.format( c.species_name)) ident = idents[present.index(True)] species_replacement_name = species_conversion.pop(ident) comps.append('{!s}:{:.4e}'.format(species_replacement_name, amount)) if len(species_conversion) > 0: raise ValueError('Unknown species in conversion: {}'.format(species_conversion)) return ', '.join(comps) def get_cantera_mole_fraction(self, species_conversion=None): """Get the mole fractions in a string format suitable for input to Cantera. Arguments: species_conversion (`dict`, optional): Mapping of species identifier to a species name. This argument should be supplied when the name of the species in the ChemKED YAML file does not match the name of the same species in a chemical kinetic mechanism. The species identifier (the key of the mapping) can be the name, InChI, or SMILES provided in the ChemKED file, while the value associated with a key should be the desired name in the Cantera format output string. Returns: `str`: String of mole fractions in the ``SPEC:AMT, SPEC:AMT`` format Raises: `ValueError`: If the composition type is ``'mass fraction'``, the conversion cannot be done because no molecular weight information is known Examples: >>> dp = DataPoint(properties) >>> dp.get_cantera_mole_fraction() 'H2:4.4400e-03, O2:5.5600e-03, Ar:9.9000e-01' >>> species_conversion = {'H2': 'h2', 'O2': 'o2'} >>> dp.get_cantera_mole_fraction(species_conversion) 'h2:4.4400e-03, o2:5.5600e-03, Ar:9.9000e-01' >>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'} >>> dp.get_cantera_mole_fraction(species_conversion) 'h2:4.4400e-03, o2:5.5600e-03, Ar:9.9000e-01' """ if self.composition_type == 'mass fraction': raise ValueError('Cannot get mole fractions from the given composition.\n' '{}'.format(self.composition)) else: return self.get_cantera_composition_string(species_conversion) def get_cantera_mass_fraction(self, species_conversion=None): """Get the mass fractions in a string format suitable for input to Cantera. Arguments: species_conversion (`dict`, optional): Mapping of species identifier to a species name. This argument should be supplied when the name of the species in the ChemKED YAML file does not match the name of the same species in a chemical kinetic mechanism. The species identifier (the key of the mapping) can be the name, InChI, or SMILES provided in the ChemKED file, while the value associated with a key should be the desired name in the Cantera format output string. Returns: `str`: String of mass fractions in the ``SPEC:AMT, SPEC:AMT`` format Raises: `ValueError`: If the composition type is ``'mole fraction'`` or ``'mole percent'``, the conversion cannot be done because no molecular weight information is known Examples: >>> dp = DataPoint(properties) >>> dp.get_cantera_mass_fraction() 'H2:2.2525e-04, O2:4.4775e-03, Ar:9.9530e-01' >>> species_conversion = {'H2': 'h2', 'O2': 'o2'} >>> dp.get_cantera_mass_fraction(species_conversion) 'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01' >>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'} >>> dp.get_cantera_mass_fraction(species_conversion) 'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01' """ if self.composition_type in ['mole fraction', 'mole percent']: raise ValueError('Cannot get mass fractions from the given composition.\n' '{}'.format(self.composition) ) else: return self.get_cantera_composition_string(species_conversion)
bsd-3-clause
bjornaa/ladim
ladim/state.py
1
6096
""" Class for the state of the model """ import sys import os import importlib import logging from typing import Any, Dict, Sized # mypy import numpy as np from netCDF4 import Dataset, num2date from .tracker import Tracker from .gridforce import Grid, Forcing # ------------------------ Config = Dict[str, Any] class State(Sized): """The model variables at a given time""" def __init__(self, config: Config, grid: Grid) -> None: logging.info("Initializing the model state") self.timestep = 0 self.timestamp = config["start_time"].astype("datetime64[s]") self.dt = np.timedelta64(config["dt"], "s") self.position_variables = ["X", "Y", "Z"] if "ibm" in config and "variables" in config["ibm"]: self.ibm_variables = config["ibm"]["variables"] else: self.ibm_variables = config.get("ibm_variables", []) self.ibm_forcing = config.get("ibm_forcing", []) self.particle_variables = config["particle_variables"] self.instance_variables = self.position_variables + [ var for var in self.ibm_variables if var not in self.particle_variables ] self.pid = np.array([], dtype=int) for name in self.instance_variables: setattr(self, name, np.array([], dtype=float)) for name in self.particle_variables: setattr(self, name, np.array([], dtype=config["release_dtype"][name])) self.track = Tracker(config) self.dt = config["dt"] if config["ibm_module"]: # Import the module logging.info("Initializing the IBM") sys.path.insert(0, os.getcwd()) ibm_module = importlib.import_module(config["ibm_module"]) # Initiate the IBM object self.ibm = ibm_module.IBM(config) else: self.ibm = None # self.num_particles = len(self.X) self.nnew = 0 # Modify with warm start? if config["warm_start_file"]: self.warm_start(config, grid) def __getitem__(self, name: str) -> None: return getattr(self, name) def __setitem__(self, name: str, value: Any) -> None: return setattr(self, name, value) def __len__(self) -> int: return len(getattr(self, "X")) def append(self, new: Dict[str, Any], forcing: Forcing) -> None: """Append new particles to the model state""" nnew = len(new["pid"]) self.pid = np.concatenate((self.pid, new["pid"])) for name in self.instance_variables: if name in new: self[name] = np.concatenate((self[name], new[name])) elif name in self.ibm_forcing: # Take values as Z must be a numpy array self[name] = np.concatenate( (self[name], forcing.field(new["X"], new["Y"], new["Z"].values, name)) ) else: # Initialize to zero self[name] = np.concatenate((self[name], np.zeros(nnew))) self.nnew = nnew def update(self, grid: Grid, forcing: Forcing) -> None: """Update the model state to the next timestep""" # From physics all particles are alive # self.alive = np.ones(len(self), dtype="bool") self.alive = grid.ingrid(self.X, self.Y) self.timestep += 1 self.timestamp += np.timedelta64(self.dt, "s") self.track.move_particles(grid, forcing, self) # logging.info( # "Model time = {}".format(self.timestamp.astype('M8[h]'))) if self.timestamp.astype("int") % 3600 == 0: # New hour logging.info("Model time = {}".format(self.timestamp.astype("M8[h]"))) # Update the IBM if self.ibm: self.ibm.update_ibm(grid, self, forcing) # Extension, allow inactive particles (not moved next time) if "active" in self.ibm_variables: pass # self.active = self.ibm_variables['active'] else: # Default = active self.active = np.ones_like(self.pid) # Surface/bottom boundary conditions # Reflective at surface I = self.Z < 0 self.Z[I] = -self.Z[I] # Keep just above bottom H = grid.sample_depth(self.X, self.Y) I = self.Z > H self.Z[I] = 0.99 * H[I] # Compactify by removing dead particles # Could have a switch to avoid this if no deaths self.pid = self.pid[self.alive] for key in self.instance_variables: self[key] = self[key][self.alive] def warm_start(self, config: Config, grid: Grid) -> None: """Perform a warm (re)start""" warm_start_file = config["warm_start_file"] try: f = Dataset(warm_start_file) except FileNotFoundError: logging.critical(f"Can not open warm start file: {warm_start_file}") raise SystemExit(1) logging.info("Reading warm start file") # Using last record in file tvar = f.variables["time"] warm_start_time = np.datetime64(num2date(tvar[-1], tvar.units)) # Not needed anymore, explicitly set in configuration # if warm_start_time != config['start_time']: # print("warm start time = ", warm_start_time) # print("start time = ", config['start_time']) # logging.error("Warm start time and start time differ") # raise SystemExit(1) pstart = f.variables["particle_count"][:-1].sum() pcount = f.variables["particle_count"][-1] self.pid = f.variables["pid"][pstart : pstart + pcount] # Give error if variable not in restart file for var in config["warm_start_variables"]: logging.debug(f"Reading {var} from warm start file") self[var] = f.variables[var][pstart : pstart + pcount] # Remove particles near edge of grid I = grid.ingrid(self["X"], self["Y"]) self.pid = self.pid[I] for var in config["warm_start_variables"]: self[var] = self[var][I]
mit
Anvil/maestro-ng
maestro/loader.py
1
2758
# Copyright (C) 2015 SignalFx, Inc. All rights reserved. # # Docker container orchestration utility. import jinja2 import os import sys import yaml from . import exceptions class MaestroYamlConstructor(yaml.constructor.Constructor): """A PyYAML object constructor that errors on duplicate keys in YAML mappings. Because for some reason PyYAML doesn't do that since 3.x.""" def construct_mapping(self, node, deep=False): if not isinstance(node, yaml.nodes.MappingNode): raise yaml.constructor.ConstructorError( None, None, "expected a mapping node, but found %s" % node.id, node.start_mark) keys = set() for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) if key in keys: raise yaml.constructor.ConstructorError( "while constructing a mapping", node.start_mark, "found duplicate key (%s)" % key, key_node.start_mark) keys.add(key) return yaml.constructor.Constructor.construct_mapping(self, node, deep) class MaestroYamlLoader(yaml.reader.Reader, yaml.scanner.Scanner, yaml.parser.Parser, yaml.composer.Composer, MaestroYamlConstructor, yaml.resolver.Resolver): """A custom YAML Loader that uses the custom MaestroYamlConstructor.""" def __init__(self, stream): yaml.reader.Reader.__init__(self, stream) yaml.scanner.Scanner.__init__(self) yaml.parser.Parser.__init__(self) yaml.composer.Composer.__init__(self) MaestroYamlConstructor.__init__(self) yaml.resolver.Resolver.__init__(self) def load(filename): """Load a config from the given file. Args: filename (string): Path to the YAML environment description configuration file to load. Use '-' for stdin. Returns: A python data structure corresponding to the YAML configuration. """ env = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(filename)), extensions=['jinja2.ext.with_']) try: if filename == '-': template = env.from_string(sys.stdin.read()) else: template = env.get_template(os.path.basename(filename)) except jinja2.exceptions.TemplateNotFound: raise exceptions.MaestroException( 'Environment description file {} not found!'.format(filename)) except Exception as e: raise exceptions.MaestroException( 'Error reading environment description file {}: {}!' .format(filename, e)) return yaml.load(template.render(env=os.environ), Loader=MaestroYamlLoader)
apache-2.0
bollu/sandhi
modules/gr36/docs/sphinx/hieroglyph/test/test_comments.py
72
24669
import unittest from hieroglyph.hieroglyph import parse_hieroglyph_text from hieroglyph.errors import HieroglyphError class CommentTests(unittest.TestCase): def test_comment1(self): source = """Fetches rows from a Bigtable. This is a continuation of the opening paragraph. Retrieves rows pertaining to the given keys from the Table instance represented by big_table. Silly things may happen if other_silly_variable is not None. Args: big_table: An open Bigtable Table instance. keys: A sequence of strings representing the key of each table row to fetch. other_silly_variable (str): Another optional variable, that has a much longer name than the other args, and which does nothing. Returns: A dict mapping keys to the corresponding table row data fetched. Each row is represented as a tuple of strings. For example: {'Serak': ('Rigel VII', 'Preparer'), 'Zim': ('Irk', 'Invader'), 'Lrrr': ('Omicron Persei 8', 'Emperor')} If a key from the keys argument is missing from the dictionary, then that row was not found in the table. Raises: IOError: An error occurred accessing the bigtable.Table object. """ expected = """ Fetches rows from a Bigtable. This is a continuation of the opening paragraph. Retrieves rows pertaining to the given keys from the Table instance represented by big_table. Silly things may happen if other_silly_variable is not None. :param big_table: An open Bigtable Table instance. :param keys: A sequence of strings representing the key of each table row to fetch. :param other_silly_variable: Another optional variable, that has a much longer name than the other args, and which does nothing. :type other_silly_variable: str :returns: A dict mapping keys to the corresponding table row data fetched. Each row is represented as a tuple of strings. For example: {'Serak': ('Rigel VII', 'Preparer'), 'Zim': ('Irk', 'Invader'), 'Lrrr': ('Omicron Persei 8', 'Emperor')} If a key from the keys argument is missing from the dictionary, then that row was not found in the table. :raises: IOError - An error occurred accessing the bigtable.Table object. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment2(self): source = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. Note: This method uses immediate execution. Args: predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. Returns: True if all elements in the sequence meet the predicate condition, otherwise False. Raises: ValueError: If the Queryable is closed() TypeError: If predicate is not callable. """ expected = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. .. note:: This method uses immediate execution. :param predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. :returns: True if all elements in the sequence meet the predicate condition, otherwise False. :raises: * ValueError - If the Queryable is closed() * TypeError - If predicate is not callable. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment3(self): source = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. Note: This method uses immediate execution. Args: predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. Returns: True if all elements in the sequence meet the predicate condition, otherwise False. Raises: ValueError: If the Queryable is closed() TypeError: If predicate is not callable. """ expected = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. .. note:: This method uses immediate execution. :param predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. :returns: True if all elements in the sequence meet the predicate condition, otherwise False. :raises: * ValueError - If the Queryable is closed() * TypeError - If predicate is not callable. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment4(self): source_lines = [u'Determine if all elements in the source sequence satisfy a condition.', u'', u'All of the source sequence will be consumed.', u'', u'Note: This method uses immediate execution.', u'', u'Args:', u' predicate: An optional single argument function used to test each', u' elements. If omitted, the bool() function is used resulting in', u' the elements being tested directly.', u'', u'Returns:', u' True if all elements in the sequence meet the predicate condition,', u' otherwise False.', u'', u'Raises:', u' ValueError: If the Queryable is closed()', u' TypeError: If predicate is not callable.', u''] expected = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. .. note:: This method uses immediate execution. :param predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. :returns: True if all elements in the sequence meet the predicate condition, otherwise False. :raises: * ValueError - If the Queryable is closed() * TypeError - If predicate is not callable. """ actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment5(self): source_lines = [u'An empty Queryable.', u'', u'Note: The same empty instance will be returned each time.', u'', u'Returns: A Queryable over an empty sequence.', u''] expected = """An empty Queryable. .. note:: The same empty instance will be returned each time. :returns: A Queryable over an empty sequence. """ actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment6(self): source_lines = [u'A convenience factory for creating Records.', u'', u'Args:', u' **kwargs: Each keyword argument will be used to initialise an', u' attribute with the same name as the argument and the given', u' value.', u'', u'Returns:', u' A Record which has a named attribute for each of the keyword arguments.', u''] expected = """A convenience factory for creating Records. :param \*\*kwargs: Each keyword argument will be used to initialise an attribute with the same name as the argument and the given value. :returns: A Record which has a named attribute for each of the keyword arguments. """ actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment7(self): source = """Projects each element of a sequence to an intermediate new sequence, flattens the resulting sequences into one sequence and optionally transforms the flattened sequence using a selector function. Note: This method uses deferred execution. Args: collection_selector: A unary function mapping each element of the source iterable into an intermediate sequence. The single argument of the collection_selector is the value of an element from the source sequence. The return value should be an iterable derived from that element value. The default collection_selector, which is the identity function, assumes that each element of the source sequence is itself iterable. result_selector: An optional unary function mapping the elements in the flattened intermediate sequence to corresponding elements of the result sequence. The single argument of the result_selector is the value of an element from the flattened intermediate sequence. The return value should be the corresponding value in the result sequence. The default result_selector is the identity function. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If either collection_selector or result_selector are not callable. """ expected = """ Projects each element of a sequence to an intermediate new sequence, flattens the resulting sequences into one sequence and optionally transforms the flattened sequence using a selector function. .. note:: This method uses deferred execution. :param collection_selector: A unary function mapping each element of the source iterable into an intermediate sequence. The single argument of the collection_selector is the value of an element from the source sequence. The return value should be an iterable derived from that element value. The default collection_selector, which is the identity function, assumes that each element of the source sequence is itself iterable. :param result_selector: An optional unary function mapping the elements in the flattened intermediate sequence to corresponding elements of the result sequence. The single argument of the result_selector is the value of an element from the flattened intermediate sequence. The return value should be the corresponding value in the result sequence. The default result_selector is the identity function. :returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. :raises: * ValueError - If this Queryable has been closed. * TypeError - If either collection_selector or result_selector are not callable. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment8(self): source = """A convenience factory for creating Records. Args: **kwargs: Each keyword argument will be used to initialise an attribute with the same name as the argument and the given value. Returns: A Record which has a named attribute for each of the keyword arguments. """ expected = """A convenience factory for creating Records. :param \*\*kwargs: Each keyword argument will be used to initialise an attribute with the same name as the argument and the given value. :returns: A Record which has a named attribute for each of the keyword arguments. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment9(self): source_lines = [u'Parse a single line of a tree to determine depth and node.', u'', u'Args:', u' This line is missing an argument name.', u' ', u'Returns:', u' A 2-tuple containing the tree 0 based tree depth as the first', u' element and the node description as the second element.', u'', u'Raises:', u' ValueError: If line does not have the expected form.', u''] self.assertRaises(HieroglyphError, lambda: parse_hieroglyph_text(source_lines)) def test_comment10(self): source = """ Execute the command described by concatenating the string function arguments with the p4 -s global scripting flag and return the results in a dictionary. For example, to run the command:: p4 -s fstat -T depotFile foo.h call:: p4('fstat', '-T', 'depotFile', 'foo.h') Args: args: The arguments to the p4 command as a list of objects which will be converted to strings. Returns: A dictionary of lists where each key in the dictionary is the field name from the command output, and each value is a list of output lines in order. Raises: PerforceError: If the command could not be run or if the command reported an error. """ expected = """ Execute the command described by concatenating the string function arguments with the p4 -s global scripting flag and return the results in a dictionary. For example, to run the command:: p4 -s fstat -T depotFile foo.h call:: p4('fstat', '-T', 'depotFile', 'foo.h') :param args: The arguments to the p4 command as a list of objects which will be converted to strings. :returns: A dictionary of lists where each key in the dictionary is the field name from the command output, and each value is a list of output lines in order. :raises: PerforceError - If the command could not be run or if the command reported an error. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment11(self): source = """Projects each element of a sequence to an intermediate new sequence, flattens the resulting sequences into one sequence and optionally transforms the flattened sequence using a selector function. Warning: This method may explode at short notice. Args: collection_selector: A unary function mapping each element of the source iterable into an intermediate sequence. The single argument of the collection_selector is the value of an element from the source sequence. The return value should be an iterable derived from that element value. The default collection_selector, which is the identity function, assumes that each element of the source sequence is itself iterable. result_selector: An optional unary function mapping the elements in the flattened intermediate sequence to corresponding elements of the result sequence. The single argument of the result_selector is the value of an element from the flattened intermediate sequence. The return value should be the corresponding value in the result sequence. The default result_selector is the identity function. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If either collection_selector or result_selector are not callable. """ expected = """ Projects each element of a sequence to an intermediate new sequence, flattens the resulting sequences into one sequence and optionally transforms the flattened sequence using a selector function. .. warning:: This method may explode at short notice. :param collection_selector: A unary function mapping each element of the source iterable into an intermediate sequence. The single argument of the collection_selector is the value of an element from the source sequence. The return value should be an iterable derived from that element value. The default collection_selector, which is the identity function, assumes that each element of the source sequence is itself iterable. :param result_selector: An optional unary function mapping the elements in the flattened intermediate sequence to corresponding elements of the result sequence. The single argument of the result_selector is the value of an element from the flattened intermediate sequence. The return value should be the corresponding value in the result sequence. The default result_selector is the identity function. :returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. :raises: * ValueError - If this Queryable has been closed. * TypeError - If either collection_selector or result_selector are not callable. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment12(self): source = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. Note: This method uses immediate execution. Args: predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. Returns: True if all elements in the sequence meet the predicate condition, otherwise False. Raises: This is not a proper exception description """ source_lines = source.splitlines() self.assertRaises(HieroglyphError, lambda: parse_hieroglyph_text(source_lines))
gpl-3.0
bootstraponline/testdroid_device_finder
device_finder.py
1
4900
# -*- coding: utf-8 -*- # from: https://github.com/bitbar/testdroid-samples/blob/03fc043ba98235b9ea46a0ab8646f3b20dd1960e/appium/sample-scripts/python/device_finder.py import os, sys, requests, json, time, httplib from optparse import OptionParser from urlparse import urljoin from datetime import datetime class DeviceFinder: # Cloud URL (not including API path) url = None # Oauth access token access_token = None # Oauth refresh token refresh_token = None # Unix timestamp (seconds) when token expires token_expiration_time = None """ Full constructor with username and password """ def __init__(self, username=None, password=None, url="https://cloud.testdroid.com", download_buffer_size=65536): self.username = username self.password = password self.cloud_url = url self.download_buffer_size = download_buffer_size """ Get Oauth2 token """ def get_token(self): if not self.access_token: # TODO: refresh url = "%s/oauth/token" % self.cloud_url payload = { "client_id": "testdroid-cloud-api", "grant_type": "password", "username": self.username, "password": self.password } res = requests.post( url, data = payload, headers = { "Accept": "application/json" } ) if res.status_code != 200: print "FAILED: Authentication or connection failure. Check Testdroid Cloud URL and your credentials." sys.exit(-1) reply = res.json() self.access_token = reply['access_token'] self.refresh_token = reply['refresh_token'] self.token_expiration_time = time.time() + reply['expires_in'] elif self.token_expiration_time < time.time(): url = "%s/oauth/token" % self.cloud_url payload = { "client_id": "testdroid-cloud-api", "grant_type": "refresh_token", "refresh_token": self.refresh_token } res = requests.post( url, data = payload, headers = { "Accept": "application/json" } ) if res.status_code != 200: print "FAILED: Unable to get a new access token using refresh token" self.access_token = None return self.get_token() reply = res.json() self.access_token = reply['access_token'] self.refresh_token = reply['refresh_token'] self.token_expiration_time = time.time() + reply['expires_in'] return self.access_token """ Helper method for getting necessary headers to use for API calls, including authentication """ def _build_headers(self): return { "Authorization": "Bearer %s" % self.get_token(), "Accept": "application/json" } """ GET from API resource """ def get(self, path=None, payload={}, headers={}): if path.find('v2/') >= 0: cut_path = path.split('v2/') path = cut_path[1] url = "%s/api/v2/%s" % (self.cloud_url, path) headers = dict(self._build_headers().items() + headers.items()) res = requests.get(url, params=payload, headers=headers) if headers['Accept'] == 'application/json': return res.json() else: return res.text """ Returns list of devices """ def get_devices(self, limit=0): return self.get("devices?limit=%s" % (limit)) """ Find available free Android device """ def available_free_android_device(self, limit=0): print "Searching Available Free Android Device..." for device in self.get_devices(limit)['data']: if device['creditsPrice'] == 0 and device['locked'] == False and device['osType'] == "ANDROID" and device['softwareVersion']['apiLevel'] > 16: print "Found device '%s'" % device['displayName'] print "" return device['displayName'] print "No available device found" print "" return "" """ Find available free iOS device """ def available_free_ios_device(self, limit=0): print "Searching Available Free iOS Device..." for device in self.get_devices(limit)['data']: if device['creditsPrice'] == 0 and device['locked'] == False and device['osType'] == "IOS": print "Found device '%s'" % device['displayName'] print "" return device['displayName'] print "No available device found" print "" return ""
apache-2.0
eyalfa/spark
examples/src/main/python/ml/logistic_regression_with_elastic_net.py
123
2029
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # $example on$ from pyspark.ml.classification import LogisticRegression # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("LogisticRegressionWithElasticNet")\ .getOrCreate() # $example on$ # Load training data training = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt") lr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8) # Fit the model lrModel = lr.fit(training) # Print the coefficients and intercept for logistic regression print("Coefficients: " + str(lrModel.coefficients)) print("Intercept: " + str(lrModel.intercept)) # We can also use the multinomial family for binary classification mlr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8, family="multinomial") # Fit the model mlrModel = mlr.fit(training) # Print the coefficients and intercepts for logistic regression with multinomial family print("Multinomial coefficients: " + str(mlrModel.coefficientMatrix)) print("Multinomial intercepts: " + str(mlrModel.interceptVector)) # $example off$ spark.stop()
apache-2.0
OTWillems/GEO1005
SpatialDecision/external/networkx/algorithms/centrality/eigenvector.py
20
6965
# coding=utf8 """ Eigenvector centrality. """ # Copyright (C) 2004-2015 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import networkx as nx __author__ = "\n".join(['Aric Hagberg (aric.hagberg@gmail.com)', 'Pieter Swart (swart@lanl.gov)', 'Sasha Gutfraind (ag362@cornell.edu)']) __all__ = ['eigenvector_centrality', 'eigenvector_centrality_numpy'] def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, weight='weight'): """Compute the eigenvector centrality for the graph G. Eigenvector centrality computes the centrality for a node based on the centrality of its neighbors. The eigenvector centrality for node `i` is .. math:: \mathbf{Ax} = \lambda \mathbf{x} where `A` is the adjacency matrix of the graph G with eigenvalue `\lambda`. By virtue of the Perron–Frobenius theorem, there is a unique and positive solution if `\lambda` is the largest eigenvalue associated with the eigenvector of the adjacency matrix `A` ([2]_). Parameters ---------- G : graph A networkx graph max_iter : integer, optional Maximum number of iterations in power method. tol : float, optional Error tolerance used to check convergence in power method iteration. nstart : dictionary, optional Starting value of eigenvector iteration for each node. weight : None or string, optional If None, all edge weights are considered equal. Otherwise holds the name of the edge attribute used as weight. Returns ------- nodes : dictionary Dictionary of nodes with eigenvector centrality as the value. Examples -------- >>> G = nx.path_graph(4) >>> centrality = nx.eigenvector_centrality(G) >>> print(['%s %0.2f'%(node,centrality[node]) for node in centrality]) ['0 0.37', '1 0.60', '2 0.60', '3 0.37'] See Also -------- eigenvector_centrality_numpy pagerank hits Notes ------ The measure was introduced by [1]_. The eigenvector calculation is done by the power iteration method and has no guarantee of convergence. The iteration will stop after ``max_iter`` iterations or an error tolerance of ``number_of_nodes(G)*tol`` has been reached. For directed graphs this is "left" eigenvector centrality which corresponds to the in-edges in the graph. For out-edges eigenvector centrality first reverse the graph with ``G.reverse()``. References ---------- .. [1] Phillip Bonacich: Power and Centrality: A Family of Measures. American Journal of Sociology 92(5):1170–1182, 1986 http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf .. [2] Mark E. J. Newman: Networks: An Introduction. Oxford University Press, USA, 2010, pp. 169. """ from math import sqrt if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph: raise nx.NetworkXException("Not defined for multigraphs.") if len(G) == 0: raise nx.NetworkXException("Empty graph.") if nstart is None: # choose starting vector with entries of 1/len(G) x = dict([(n,1.0/len(G)) for n in G]) else: x = nstart # normalize starting vector s = 1.0/sum(x.values()) for k in x: x[k] *= s nnodes = G.number_of_nodes() # make up to max_iter iterations for i in range(max_iter): xlast = x x = dict.fromkeys(xlast, 0) # do the multiplication y^T = x^T A for n in x: for nbr in G[n]: x[nbr] += xlast[n] * G[n][nbr].get(weight, 1) # normalize vector try: s = 1.0/sqrt(sum(v**2 for v in x.values())) # this should never be zero? except ZeroDivisionError: s = 1.0 for n in x: x[n] *= s # check convergence err = sum([abs(x[n]-xlast[n]) for n in x]) if err < nnodes*tol: return x raise nx.NetworkXError("""eigenvector_centrality(): power iteration failed to converge in %d iterations."%(i+1))""") def eigenvector_centrality_numpy(G, weight='weight'): """Compute the eigenvector centrality for the graph G. Eigenvector centrality computes the centrality for a node based on the centrality of its neighbors. The eigenvector centrality for node `i` is .. math:: \mathbf{Ax} = \lambda \mathbf{x} where `A` is the adjacency matrix of the graph G with eigenvalue `\lambda`. By virtue of the Perron–Frobenius theorem, there is a unique and positive solution if `\lambda` is the largest eigenvalue associated with the eigenvector of the adjacency matrix `A` ([2]_). Parameters ---------- G : graph A networkx graph weight : None or string, optional The name of the edge attribute used as weight. If None, all edge weights are considered equal. Returns ------- nodes : dictionary Dictionary of nodes with eigenvector centrality as the value. Examples -------- >>> G = nx.path_graph(4) >>> centrality = nx.eigenvector_centrality_numpy(G) >>> print(['%s %0.2f'%(node,centrality[node]) for node in centrality]) ['0 0.37', '1 0.60', '2 0.60', '3 0.37'] See Also -------- eigenvector_centrality pagerank hits Notes ------ The measure was introduced by [1]_. This algorithm uses the SciPy sparse eigenvalue solver (ARPACK) to find the largest eigenvalue/eigenvector pair. For directed graphs this is "left" eigenvector centrality which corresponds to the in-edges in the graph. For out-edges eigenvector centrality first reverse the graph with G.reverse(). References ---------- .. [1] Phillip Bonacich: Power and Centrality: A Family of Measures. American Journal of Sociology 92(5):1170–1182, 1986 http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf .. [2] Mark E. J. Newman: Networks: An Introduction. Oxford University Press, USA, 2010, pp. 169. """ import scipy as sp from scipy.sparse import linalg if len(G) == 0: raise nx.NetworkXException('Empty graph.') M = nx.to_scipy_sparse_matrix(G, nodelist=G.nodes(), weight=weight, dtype=float) eigenvalue, eigenvector = linalg.eigs(M.T, k=1, which='LR') largest = eigenvector.flatten().real norm = sp.sign(largest.sum())*sp.linalg.norm(largest) centrality = dict(zip(G,map(float,largest/norm))) return centrality # fixture for nose tests def setup_module(module): from nose import SkipTest try: import scipy except: raise SkipTest("SciPy not available")
gpl-2.0
soulshake/readthedocs.org
readthedocs/builds/version_slug.py
34
5991
"""Contains logic for handling version slugs. Handling slugs for versions is not too straightforward. We need to allow some characters which are uncommon in usual slugs. They are dots and underscores. Usually we want the slug to be the name of the tag or branch corresponding VCS version. However we need to strip url-destroying characters like slashes. So the syntax for version slugs should be: * Start with a lowercase ascii char or a digit. * All other characters must be lowercase ascii chars, digits or dots. If uniqueness is not met for a slug in a project, we append a dash and a letter starting with ``a``. We keep increasing that letter until we have a unique slug. This is used since using numbers in tags is too common and appending another number would be confusing. """ import math import re import string from operator import truediv from django.db import models from django.utils.encoding import force_text # Regex breakdown: # [a-z0-9] -- start with alphanumeric value # [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii # *? -- allow multiple of those, but be not greedy about the matching # (?: ... ) -- wrap everything so that the pattern cannot escape when used in # regexes. VERSION_SLUG_REGEX = '(?:[a-z0-9][-._a-z0-9]*?)' class VersionSlugField(models.CharField): """ Implementation inspired by ``django_extensions.db.fields.AutoSlugField``. """ invalid_chars_re = re.compile('[^-._a-z0-9]') leading_punctuation_re = re.compile('^[-._]+') placeholder = '-' fallback_slug = 'unknown' test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX)) def __init__(self, *args, **kwargs): kwargs.setdefault('db_index', True) populate_from = kwargs.pop('populate_from', None) if populate_from is None: raise ValueError("missing 'populate_from' argument") else: self._populate_from = populate_from super(VersionSlugField, self).__init__(*args, **kwargs) def get_queryset(self, model_cls, slug_field): for field, model in model_cls._meta.get_fields_with_model(): if model and field == slug_field: return model._default_manager.all() return model_cls._default_manager.all() def slugify(self, content): if not content: return '' slugified = content.lower() slugified = self.invalid_chars_re.sub(self.placeholder, slugified) slugified = self.leading_punctuation_re.sub('', slugified) if not slugified: return self.fallback_slug return slugified def uniquifying_suffix(self, iteration): """ This creates a suffix based on the number given as ``iteration``. It will return a value encoded as lowercase ascii letter. So we have an alphabet of 26 letters. The returned suffix will be for example ``_yh`` where ``yh`` is the encoding of ``iteration``. The length of it will be ``math.log(iteration, 26)``. Examples:: uniquifying_suffix(0) == '_a' uniquifying_suffix(25) == '_z' uniquifying_suffix(26) == '_ba' uniquifying_suffix(52) == '_ca' """ alphabet = string.lowercase length = len(alphabet) if iteration == 0: power = 0 else: power = int(math.log(iteration, length)) current = iteration suffix = '' for exp in reversed(range(0, power + 1)): digit = int(truediv(current, length ** exp)) suffix += alphabet[digit] current = current % length ** exp return '_{suffix}'.format(suffix=suffix) def create_slug(self, model_instance): # get fields to populate from and slug field to set slug_field = model_instance._meta.get_field(self.attname) slug = self.slugify(getattr(model_instance, self._populate_from)) next = 0 # strip slug depending on max_length attribute of the slug field # and clean-up slug_len = slug_field.max_length if slug_len: slug = slug[:slug_len] original_slug = slug # exclude the current model instance from the queryset used in finding # the next valid slug queryset = self.get_queryset(model_instance.__class__, slug_field) if model_instance.pk: queryset = queryset.exclude(pk=model_instance.pk) # form a kwarg dict used to impliment any unique_together contraints kwargs = {} for params in model_instance._meta.unique_together: if self.attname in params: for param in params: kwargs[param] = getattr(model_instance, param, None) kwargs[self.attname] = slug # increases the number while searching for the next valid slug # depending on the given slug, clean-up while not slug or queryset.filter(**kwargs): slug = original_slug end = self.uniquifying_suffix(next) end_len = len(end) if slug_len and len(slug) + end_len > slug_len: slug = slug[:slug_len - end_len] slug = slug + end kwargs[self.attname] = slug next += 1 assert self.test_pattern.match(slug), ( 'Invalid generated slug: {slug}'.format(slug=slug)) return slug def pre_save(self, model_instance, add): value = getattr(model_instance, self.attname) # We only create a new slug if none was set yet. if not value and add: value = force_text(self.create_slug(model_instance)) setattr(model_instance, self.attname, value) return value def deconstruct(self): name, path, args, kwargs = super(VersionSlugField, self).deconstruct() kwargs['populate_from'] = self._populate_from return name, path, args, kwargs
mit
icgc/icgc-get
tests/test_ega.py
1
1474
# # Copyright (c) 2016 The Ontario Institute for Cancer Research. All rights reserved. # # This program and the accompanying materials are made available under the terms of the GNU Public License v3.0. # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT # SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from conftest import download_test def test_ega_file(data_dir): download_test(['FI98765'], 'status', 'ega', ['_methylationCEL_CLL-174.CEL'], [5556766], data_dir) def test_ega_manifest(data_dir): download_test(["4294ed2b-4d41-4967-8c5d-231027fa40c7"], 'download', 'ega', ['_methylationCEL_CLL-174.CEL'], [5556766], data_dir)
gpl-3.0
timothycrosley/webelements_site
DynamicForm/AppEngine.py
4
1742
""" Defines an AppEngine compatible version of DynamicForm """ import webapp2 from . import HTTP, PageControls from .DynamicForm import DynamicForm as BaseDynamicForm class DynamicForm(webapp2.RequestHandler, BaseDynamicForm): """ Overrides handler methods of the DynamicForm class to enable it to run seamlessly on AppEngine """ def __init__(self, request=None, response=None): webapp2.RequestHandler.__init__(self, request, response) BaseDynamicForm.__init__(self) def get(self): response = self.handleRequest(HTTP.Request.fromAppEngineRequest(self.request, "GET")) return response.toAppEngineResponse(self.response) def post(self): response = self.handleRequest(HTTP.Request.fromAppEngineRequest(self.request, "POST")) return response.toAppEngineResponse(self.response) def put(self): response = self.handleRequest(HTTP.Request.fromAppEngineRequest(self.request, "PUT")) return response.toAppEngineResponse(self.response) def head(self): response = self.handleRequest(HTTP.Request.fromAppEngineRequest(self.request, "HEAD")) return response.toAppEngineResponse(self.response) def options(self): response = self.handleRequest(HTTP.Request.fromAppEngineRequest(self.request, "OPTIONS")) return response.toAppEngineResponse(self.response) def delete(self): response = self.handleRequest(HTTP.Request.fromAppEngineRequest(self.request, "DELETE")) return response.toAppEngineResponse(self.response) def trace(self): response = self.handleRequest(HTTP.Request.fromAppEngineRequest(self.request, "TRACE")) return response.toAppEngineResponse(self.response)
mit
mistercrunch/panoramix
tests/superset_test_config_thumbnails.py
3
2665
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # type: ignore from copy import copy from superset.config import * AUTH_USER_REGISTRATION_ROLE = "alpha" SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(DATA_DIR, "unittests.db") DEBUG = True SUPERSET_WEBSERVER_PORT = 8081 # Allowing SQLALCHEMY_DATABASE_URI to be defined as an env var for # continuous integration if "SUPERSET__SQLALCHEMY_DATABASE_URI" in os.environ: SQLALCHEMY_DATABASE_URI = os.environ["SUPERSET__SQLALCHEMY_DATABASE_URI"] if "sqlite" in SQLALCHEMY_DATABASE_URI: logger.warning( "SQLite Database support for metadata databases will be removed \ in a future version of Superset." ) SQL_SELECT_AS_CTA = True SQL_MAX_ROW = 666 def GET_FEATURE_FLAGS_FUNC(ff): ff_copy = copy(ff) ff_copy["super"] = "set" return ff_copy TESTING = True WTF_CSRF_ENABLED = False PUBLIC_ROLE_LIKE = "Gamma" AUTH_ROLE_PUBLIC = "Public" EMAIL_NOTIFICATIONS = False CACHE_CONFIG = {"CACHE_TYPE": "simple"} REDIS_HOST = os.environ.get("REDIS_HOST", "localhost") REDIS_PORT = os.environ.get("REDIS_PORT", "6379") REDIS_CELERY_DB = os.environ.get("REDIS_CELERY_DB", 2) REDIS_RESULTS_DB = os.environ.get("REDIS_RESULTS_DB", 3) class CeleryConfig(object): BROKER_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_CELERY_DB}" CELERY_IMPORTS = ("superset.sql_lab", "superset.tasks.thumbnails") CELERY_ANNOTATIONS = {"sql_lab.add": {"rate_limit": "10/s"}} CONCURRENCY = 1 CELERY_CONFIG = CeleryConfig FEATURE_FLAGS = { "foo": "bar", "KV_STORE": False, "SHARE_QUERIES_VIA_KV_STORE": False, "THUMBNAILS": True, "THUMBNAILS_SQLA_LISTENERS": False, } THUMBNAIL_CACHE_CONFIG = { "CACHE_TYPE": "redis", "CACHE_DEFAULT_TIMEOUT": 10000, "CACHE_KEY_PREFIX": "superset_thumbnails_", "CACHE_REDIS_HOST": REDIS_HOST, "CACHE_REDIS_PORT": REDIS_PORT, "CACHE_REDIS_DB": REDIS_CELERY_DB, }
apache-2.0
saurabh6790/test-erp
erpnext/patches/v4_0/update_custom_print_formats_for_renamed_fields.py
119
1265
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import re def execute(): # NOTE: sequence is important fields_list = ( ("amount", "base_amount"), ("ref_rate", "price_list_rate"), ("base_ref_rate", "base_price_list_rate"), ("adj_rate", "discount_percentage"), ("export_rate", "rate"), ("basic_rate", "base_rate"), ("export_amount", "amount"), ("reserved_warehouse", "warehouse"), ("import_ref_rate", "price_list_rate"), ("purchase_ref_rate", "base_price_list_rate"), ("discount_rate", "discount_percentage"), ("import_rate", "rate"), ("purchase_rate", "base_rate"), ("import_amount", "amount") ) condition = " or ".join("""html like "%%{}%%" """.format(d[0].replace("_", "\\_")) for d in fields_list if d[0] != "amount") for name, html in frappe.db.sql("""select name, html from `tabPrint Format` where standard = 'No' and ({}) and html not like '%%frappe.%%'""".format(condition)): html = html.replace("wn.", "frappe.") for from_field, to_field in fields_list: html = re.sub(r"\b{}\b".format(from_field), to_field, html) frappe.db.set_value("Print Format", name, "html", html)
agpl-3.0
AutorestCI/azure-sdk-for-python
azure-mgmt-resource/azure/mgmt/resource/resources/v2016_02_01/models/provider_resource_type.py
2
1829
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ProviderResourceType(Model): """Resource type managed by the resource provider. :param resource_type: The resource type. :type resource_type: str :param locations: The collection of locations where this resource type can be created in. :type locations: list[str] :param aliases: The aliases that are supported by this resource type. :type aliases: list[~azure.mgmt.resource.resources.v2016_02_01.models.AliasType] :param api_versions: The api version. :type api_versions: list[str] :param properties: The properties. :type properties: dict[str, str] """ _attribute_map = { 'resource_type': {'key': 'resourceType', 'type': 'str'}, 'locations': {'key': 'locations', 'type': '[str]'}, 'aliases': {'key': 'aliases', 'type': '[AliasType]'}, 'api_versions': {'key': 'apiVersions', 'type': '[str]'}, 'properties': {'key': 'properties', 'type': '{str}'}, } def __init__(self, resource_type=None, locations=None, aliases=None, api_versions=None, properties=None): super(ProviderResourceType, self).__init__() self.resource_type = resource_type self.locations = locations self.aliases = aliases self.api_versions = api_versions self.properties = properties
mit
vighneshbirodkar/scikit-image
skimage/util/tests/test_apply_parallel.py
8
1976
from __future__ import absolute_import import numpy as np from numpy.testing import assert_array_almost_equal from numpy.testing.decorators import skipif from skimage.filters import threshold_adaptive, gaussian from skimage.util.apply_parallel import apply_parallel, dask_available @skipif(not dask_available) def test_apply_parallel(): # data a = np.arange(144).reshape(12, 12).astype(float) # apply the filter expected1 = threshold_adaptive(a, 3) result1 = apply_parallel(threshold_adaptive, a, chunks=(6, 6), depth=5, extra_arguments=(3,), extra_keywords={'mode': 'reflect'}) assert_array_almost_equal(result1, expected1) def wrapped_gauss(arr): return gaussian(arr, 1, mode='reflect') expected2 = gaussian(a, 1, mode='reflect') result2 = apply_parallel(wrapped_gauss, a, chunks=(6, 6), depth=5) assert_array_almost_equal(result2, expected2) @skipif(not dask_available) def test_no_chunks(): a = np.ones(1 * 4 * 8 * 9).reshape(1, 4, 8, 9) def add_42(arr): return arr + 42 expected = add_42(a) result = apply_parallel(add_42, a) assert_array_almost_equal(result, expected) @skipif(not dask_available) def test_apply_parallel_wrap(): def wrapped(arr): return gaussian(arr, 1, mode='wrap') a = np.arange(144).reshape(12, 12).astype(float) expected = gaussian(a, 1, mode='wrap') result = apply_parallel(wrapped, a, chunks=(6, 6), depth=5, mode='wrap') assert_array_almost_equal(result, expected) @skipif(not dask_available) def test_apply_parallel_nearest(): def wrapped(arr): return gaussian(arr, 1, mode='nearest') a = np.arange(144).reshape(12, 12).astype(float) expected = gaussian(a, 1, mode='nearest') result = apply_parallel(wrapped, a, chunks=(6, 6), depth={0: 5, 1: 5}, mode='nearest') assert_array_almost_equal(result, expected)
bsd-3-clause
yashodhank/frappe
frappe/desk/calendar.py
10
1155
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe from frappe import _ import json @frappe.whitelist() def update_event(args, field_map): """Updates Event (called via calendar) based on passed `field_map`""" args = frappe._dict(json.loads(args)) field_map = frappe._dict(json.loads(field_map)) w = frappe.get_doc(args.doctype, args.name) w.set(field_map.start, args[field_map.start]) w.set(field_map.end, args.get(field_map.end)) w.save() def get_event_conditions(doctype, filters=None): """Returns SQL conditions with user permissions and filters for event queries""" from frappe.desk.reportview import build_match_conditions if not frappe.has_permission(doctype): frappe.throw(_("Not Permitted"), frappe.PermissionError) conditions = build_match_conditions(doctype) conditions = conditions and (" and " + conditions) or "" if filters: filters = json.loads(filters) for key in filters: if filters[key]: conditions += 'and `{0}` = "{1}"'.format(frappe.db.escape(key), frappe.db.escape(filters[key])) return conditions
mit
sikmir/QGIS
python/PyQt/PyQt5/QtWidgets.py
45
1111
# -*- coding: utf-8 -*- """ *************************************************************************** QtWidgets.py --------------------- Date : November 2015 Copyright : (C) 2015 by Matthias Kuhn Email : matthias at opengis dot ch *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Matthias Kuhn' __date__ = 'November 2015' __copyright__ = '(C) 2015, Matthias Kuhn' from PyQt5.QtWidgets import * QLayout.setMargin = lambda self, m: self.setContentsMargins(m, m, m, m)
gpl-2.0
spring-week-topos/nova-week
nova/tests/compute/test_compute_cells.py
15
8527
# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Compute w/ Cells """ import functools import mock from oslo.config import cfg from nova.cells import manager from nova.compute import api as compute_api from nova.compute import cells_api as compute_cells_api from nova import db from nova.openstack.common import jsonutils from nova import quota from nova.tests.compute import test_compute ORIG_COMPUTE_API = None cfg.CONF.import_opt('enable', 'nova.cells.opts', group='cells') def stub_call_to_cells(context, instance, method, *args, **kwargs): fn = getattr(ORIG_COMPUTE_API, method) original_instance = kwargs.pop('original_instance', None) if original_instance: instance = original_instance # Restore this in 'child cell DB' db.instance_update(context, instance['uuid'], dict(vm_state=instance['vm_state'], task_state=instance['task_state'])) # Use NoopQuotaDriver in child cells. saved_quotas = quota.QUOTAS quota.QUOTAS = quota.QuotaEngine( quota_driver_class=quota.NoopQuotaDriver()) compute_api.QUOTAS = quota.QUOTAS try: return fn(context, instance, *args, **kwargs) finally: quota.QUOTAS = saved_quotas compute_api.QUOTAS = saved_quotas def stub_cast_to_cells(context, instance, method, *args, **kwargs): fn = getattr(ORIG_COMPUTE_API, method) original_instance = kwargs.pop('original_instance', None) if original_instance: instance = original_instance # Restore this in 'child cell DB' db.instance_update(context, instance['uuid'], dict(vm_state=instance['vm_state'], task_state=instance['task_state'])) # Use NoopQuotaDriver in child cells. saved_quotas = quota.QUOTAS quota.QUOTAS = quota.QuotaEngine( quota_driver_class=quota.NoopQuotaDriver()) compute_api.QUOTAS = quota.QUOTAS try: fn(context, instance, *args, **kwargs) finally: quota.QUOTAS = saved_quotas compute_api.QUOTAS = saved_quotas def deploy_stubs(stubs, api, original_instance=None): call = stub_call_to_cells cast = stub_cast_to_cells if original_instance: kwargs = dict(original_instance=original_instance) call = functools.partial(stub_call_to_cells, **kwargs) cast = functools.partial(stub_cast_to_cells, **kwargs) stubs.Set(api, '_call_to_cells', call) stubs.Set(api, '_cast_to_cells', cast) def wrap_create_instance(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): instance = self._create_fake_instance() def fake(*args, **kwargs): return instance self.stubs.Set(self, '_create_fake_instance', fake) original_instance = jsonutils.to_primitive(instance) deploy_stubs(self.stubs, self.compute_api, original_instance=original_instance) return func(self, *args, **kwargs) return wrapper class CellsComputeAPITestCase(test_compute.ComputeAPITestCase): def setUp(self): super(CellsComputeAPITestCase, self).setUp() global ORIG_COMPUTE_API ORIG_COMPUTE_API = self.compute_api self.flags(enable=True, group='cells') def _fake_cell_read_only(*args, **kwargs): return False def _fake_validate_cell(*args, **kwargs): return def _nop_update(context, instance, **kwargs): return instance self.compute_api = compute_cells_api.ComputeCellsAPI() self.stubs.Set(self.compute_api, '_cell_read_only', _fake_cell_read_only) self.stubs.Set(self.compute_api, '_validate_cell', _fake_validate_cell) # NOTE(belliott) Don't update the instance state # for the tests at the API layer. Let it happen after # the stub cast to cells so that expected_task_states # match. self.stubs.Set(self.compute_api, 'update', _nop_update) deploy_stubs(self.stubs, self.compute_api) def tearDown(self): global ORIG_COMPUTE_API self.compute_api = ORIG_COMPUTE_API super(CellsComputeAPITestCase, self).tearDown() def test_instance_metadata(self): self.skipTest("Test is incompatible with cells.") def test_evacuate(self): self.skipTest("Test is incompatible with cells.") def test_delete_instance_no_cell(self): cells_rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(cells_rpcapi, 'instance_delete_everywhere') inst = self._create_fake_instance_obj() cells_rpcapi.instance_delete_everywhere(self.context, inst, 'hard') self.mox.ReplayAll() self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance', lambda *a, **kw: None) self.compute_api.delete(self.context, inst) def test_soft_delete_instance_no_cell(self): cells_rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(cells_rpcapi, 'instance_delete_everywhere') inst = self._create_fake_instance_obj() cells_rpcapi.instance_delete_everywhere(self.context, inst, 'soft') self.mox.ReplayAll() self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance', lambda *a, **kw: None) self.compute_api.soft_delete(self.context, inst) def test_get_migrations(self): filters = {'cell_name': 'ChildCell', 'status': 'confirmed'} migrations = {'migrations': [{'id': 1234}]} cells_rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(cells_rpcapi, 'get_migrations') cells_rpcapi.get_migrations(self.context, filters).AndReturn(migrations) self.mox.ReplayAll() response = self.compute_api.get_migrations(self.context, filters) self.assertEqual(migrations, response) @mock.patch('nova.cells.messaging._TargetedMessage') def test_rebuild_sig(self, mock_msg): # TODO(belliott) Cells could benefit from better testing to ensure API # and manager signatures stay up to date def wire(version): # wire the rpc cast directly to the manager method to make sure # the signature matches cells_mgr = manager.CellsManager() def cast(context, method, *args, **kwargs): fn = getattr(cells_mgr, method) fn(context, *args, **kwargs) cells_mgr.cast = cast return cells_mgr cells_rpcapi = self.compute_api.cells_rpcapi client = cells_rpcapi.client with mock.patch.object(client, 'prepare', side_effect=wire): inst = self._create_fake_instance_obj() inst.cell_name = 'mycell' cells_rpcapi.rebuild_instance(self.context, inst, 'pass', None, None, None, None, None, recreate=False, on_shared_storage=False, host='host', preserve_ephemeral=True, kwargs=None) # one targeted message should have been created self.assertEqual(1, mock_msg.call_count) class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase): def setUp(self): super(CellsComputePolicyTestCase, self).setUp() global ORIG_COMPUTE_API ORIG_COMPUTE_API = self.compute_api self.compute_api = compute_cells_api.ComputeCellsAPI() deploy_stubs(self.stubs, self.compute_api) def tearDown(self): global ORIG_COMPUTE_API self.compute_api = ORIG_COMPUTE_API super(CellsComputePolicyTestCase, self).tearDown()
apache-2.0
jbenden/ansible
lib/ansible/modules/network/system/net_system.py
8
3065
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = """ --- module: net_system version_added: "2.4" author: "Ricardo Carrillo Cruz (@rcarrillocruz)" short_description: Manage the system attributes on network devices description: - This module provides declarative management of node system attributes on network devices. It provides an option to configure host system parameters or remove those parameters from the device active configuration. options: hostname: description: - Configure the device hostname parameter. This option takes an ASCII string value. domain_name: description: - Configure the IP domain name on the remote device to the provided value. Value should be in the dotted name form and will be appended to the C(hostname) to create a fully-qualified domain name. domain_search: description: - Provides the list of domain suffixes to append to the hostname for the purpose of doing name resolution. This argument accepts a list of names and will be reconciled with the current active configuration on the running node. lookup_source: description: - Provides one or more source interfaces to use for performing DNS lookups. The interface provided in C(lookup_source) must be a valid interface configured on the device. name_servers: description: - List of DNS name servers by IP address to use to perform name resolution lookups. This argument accepts either a list of DNS servers See examples. state: description: - State of the configuration values in the device's current active configuration. When set to I(present), the values should be configured in the device active configuration and when set to I(absent) the values should not be in the device active configuration default: present choices: ['present', 'absent'] """ EXAMPLES = """ - name: configure hostname and domain name net_system: hostname: ios01 domain_name: test.example.com domain-search: - ansible.com - redhat.com - cisco.com - name: remove configuration net_system: state: absent - name: configure DNS lookup sources net_system: lookup_source: MgmtEth0/0/CPU0/0 - name: configure name servers net_system: name_servers: - 8.8.8.8 - 8.8.4.4 """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always, except for the platforms that use Netconf transport to manage the device. type: list sample: - hostname ios01 - ip domain name test.example.com """
gpl-3.0
roderickvd/nzbToMedia
libs/unidecode/x0a4.py
252
4437
data = ( 'qiet', # 0x00 'qiex', # 0x01 'qie', # 0x02 'qiep', # 0x03 'quot', # 0x04 'quox', # 0x05 'quo', # 0x06 'quop', # 0x07 'qot', # 0x08 'qox', # 0x09 'qo', # 0x0a 'qop', # 0x0b 'qut', # 0x0c 'qux', # 0x0d 'qu', # 0x0e 'qup', # 0x0f 'qurx', # 0x10 'qur', # 0x11 'qyt', # 0x12 'qyx', # 0x13 'qy', # 0x14 'qyp', # 0x15 'qyrx', # 0x16 'qyr', # 0x17 'jjit', # 0x18 'jjix', # 0x19 'jji', # 0x1a 'jjip', # 0x1b 'jjiet', # 0x1c 'jjiex', # 0x1d 'jjie', # 0x1e 'jjiep', # 0x1f 'jjuox', # 0x20 'jjuo', # 0x21 'jjuop', # 0x22 'jjot', # 0x23 'jjox', # 0x24 'jjo', # 0x25 'jjop', # 0x26 'jjut', # 0x27 'jjux', # 0x28 'jju', # 0x29 'jjup', # 0x2a 'jjurx', # 0x2b 'jjur', # 0x2c 'jjyt', # 0x2d 'jjyx', # 0x2e 'jjy', # 0x2f 'jjyp', # 0x30 'njit', # 0x31 'njix', # 0x32 'nji', # 0x33 'njip', # 0x34 'njiet', # 0x35 'njiex', # 0x36 'njie', # 0x37 'njiep', # 0x38 'njuox', # 0x39 'njuo', # 0x3a 'njot', # 0x3b 'njox', # 0x3c 'njo', # 0x3d 'njop', # 0x3e 'njux', # 0x3f 'nju', # 0x40 'njup', # 0x41 'njurx', # 0x42 'njur', # 0x43 'njyt', # 0x44 'njyx', # 0x45 'njy', # 0x46 'njyp', # 0x47 'njyrx', # 0x48 'njyr', # 0x49 'nyit', # 0x4a 'nyix', # 0x4b 'nyi', # 0x4c 'nyip', # 0x4d 'nyiet', # 0x4e 'nyiex', # 0x4f 'nyie', # 0x50 'nyiep', # 0x51 'nyuox', # 0x52 'nyuo', # 0x53 'nyuop', # 0x54 'nyot', # 0x55 'nyox', # 0x56 'nyo', # 0x57 'nyop', # 0x58 'nyut', # 0x59 'nyux', # 0x5a 'nyu', # 0x5b 'nyup', # 0x5c 'xit', # 0x5d 'xix', # 0x5e 'xi', # 0x5f 'xip', # 0x60 'xiet', # 0x61 'xiex', # 0x62 'xie', # 0x63 'xiep', # 0x64 'xuox', # 0x65 'xuo', # 0x66 'xot', # 0x67 'xox', # 0x68 'xo', # 0x69 'xop', # 0x6a 'xyt', # 0x6b 'xyx', # 0x6c 'xy', # 0x6d 'xyp', # 0x6e 'xyrx', # 0x6f 'xyr', # 0x70 'yit', # 0x71 'yix', # 0x72 'yi', # 0x73 'yip', # 0x74 'yiet', # 0x75 'yiex', # 0x76 'yie', # 0x77 'yiep', # 0x78 'yuot', # 0x79 'yuox', # 0x7a 'yuo', # 0x7b 'yuop', # 0x7c 'yot', # 0x7d 'yox', # 0x7e 'yo', # 0x7f 'yop', # 0x80 'yut', # 0x81 'yux', # 0x82 'yu', # 0x83 'yup', # 0x84 'yurx', # 0x85 'yur', # 0x86 'yyt', # 0x87 'yyx', # 0x88 'yy', # 0x89 'yyp', # 0x8a 'yyrx', # 0x8b 'yyr', # 0x8c '[?]', # 0x8d '[?]', # 0x8e '[?]', # 0x8f 'Qot', # 0x90 'Li', # 0x91 'Kit', # 0x92 'Nyip', # 0x93 'Cyp', # 0x94 'Ssi', # 0x95 'Ggop', # 0x96 'Gep', # 0x97 'Mi', # 0x98 'Hxit', # 0x99 'Lyr', # 0x9a 'Bbut', # 0x9b 'Mop', # 0x9c 'Yo', # 0x9d 'Put', # 0x9e 'Hxuo', # 0x9f 'Tat', # 0xa0 'Ga', # 0xa1 '[?]', # 0xa2 '[?]', # 0xa3 'Ddur', # 0xa4 'Bur', # 0xa5 'Gguo', # 0xa6 'Nyop', # 0xa7 'Tu', # 0xa8 'Op', # 0xa9 'Jjut', # 0xaa 'Zot', # 0xab 'Pyt', # 0xac 'Hmo', # 0xad 'Yit', # 0xae 'Vur', # 0xaf 'Shy', # 0xb0 'Vep', # 0xb1 'Za', # 0xb2 'Jo', # 0xb3 '[?]', # 0xb4 'Jjy', # 0xb5 'Got', # 0xb6 'Jjie', # 0xb7 'Wo', # 0xb8 'Du', # 0xb9 'Shur', # 0xba 'Lie', # 0xbb 'Cy', # 0xbc 'Cuop', # 0xbd 'Cip', # 0xbe 'Hxop', # 0xbf 'Shat', # 0xc0 '[?]', # 0xc1 'Shop', # 0xc2 'Che', # 0xc3 'Zziet', # 0xc4 '[?]', # 0xc5 'Ke', # 0xc6 '[?]', # 0xc7 '[?]', # 0xc8 '[?]', # 0xc9 '[?]', # 0xca '[?]', # 0xcb '[?]', # 0xcc '[?]', # 0xcd '[?]', # 0xce '[?]', # 0xcf '[?]', # 0xd0 '[?]', # 0xd1 '[?]', # 0xd2 '[?]', # 0xd3 '[?]', # 0xd4 '[?]', # 0xd5 '[?]', # 0xd6 '[?]', # 0xd7 '[?]', # 0xd8 '[?]', # 0xd9 '[?]', # 0xda '[?]', # 0xdb '[?]', # 0xdc '[?]', # 0xdd '[?]', # 0xde '[?]', # 0xdf '[?]', # 0xe0 '[?]', # 0xe1 '[?]', # 0xe2 '[?]', # 0xe3 '[?]', # 0xe4 '[?]', # 0xe5 '[?]', # 0xe6 '[?]', # 0xe7 '[?]', # 0xe8 '[?]', # 0xe9 '[?]', # 0xea '[?]', # 0xeb '[?]', # 0xec '[?]', # 0xed '[?]', # 0xee '[?]', # 0xef '[?]', # 0xf0 '[?]', # 0xf1 '[?]', # 0xf2 '[?]', # 0xf3 '[?]', # 0xf4 '[?]', # 0xf5 '[?]', # 0xf6 '[?]', # 0xf7 '[?]', # 0xf8 '[?]', # 0xf9 '[?]', # 0xfa '[?]', # 0xfb '[?]', # 0xfc '[?]', # 0xfd '[?]', # 0xfe )
gpl-3.0
romain-li/edx-platform
lms/djangoapps/commerce/migrations/0002_commerceconfiguration.py
59
1477
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('commerce', '0001_data__add_ecommerce_service_user'), ] operations = [ migrations.CreateModel( name='CommerceConfiguration', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')), ('enabled', models.BooleanField(default=False, verbose_name='Enabled')), ('checkout_on_ecommerce_service', models.BooleanField(default=False, help_text='Use the checkout page hosted by the E-Commerce service.')), ('single_course_checkout_page', models.CharField(default=b'/basket/single-item/', help_text='Path to single course checkout page hosted by the E-Commerce service.', max_length=255)), ('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')), ], options={ 'ordering': ('-change_date',), 'abstract': False, }, ), ]
agpl-3.0
mcanthony/rethinkdb
test/interface/stat.py
3
14876
#!/usr/bin/env python # Copyright 2010-2015 RethinkDB, all rights reserved. # This file tests the `rethinkdb.stats` admin table. # The scenario works by starting a cluster of two servers and two tables. The tables are # then sharded across the two servers (no replicas), and populated with 100 rows. # A small read/write workload runs in the background during the entire test to ensure # that we have stats to read. In addition, we run with a cache-size of zero to force # disk reads and writes. # # 1. Cluster is started, table populated # 2. Gather and verify stats # 3. Shut down the second server # 4. Gather and verify stats - observe timeouts for the missing server # 5. Restart the second server # 6. Gather and verify stats # # Stats verification is rather weak because we can't expect specific values for many # fields. For most of them, we simple assert that they are greater than zero. In # addition, the full scan of the `stats` table in verified for internal consistency. # That is, we make sure the tables' and servers' stats add up to the cluster stats, # and so on. This is not valid when getting rows from the stats table individually, # as there will be race conditions then. import multiprocessing, os, pprint, random, re, sys, time sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import driver, scenario_common, utils, vcoptparse, workload_runner r = utils.import_python_driver() db = 'test' server_names = ['nate', 'grey'] table_names = ['foo', 'bar'] def read_write_workload(port, table, stop_event): conn = r.connect("localhost", port) ids = list(r.range(100).map(lambda x: r.uuid()).run(conn)) r.db(db).table(table).insert([{'id': i, 'value': 0} for i in ids]).run(conn) # Increment this every loop so the update actually results in a write counter = 0 while not stop_event.is_set(): counter += 1 try: r.db(db).table(table).get(random.choice(ids)).run(conn) r.db(db).table(table).insert({'id':random.choice(ids), 'value': counter}, conflict='replace').run(conn) time.sleep(0.05) except r.ReqlRuntimeError: # Ignore runtime errors and keep going until stopped pass # Per-second values are floats, so do a fuzzy comparison to allow for accumulated error def fuzzy_compare(left, right): return (left - right) < 1e-03 def find_rows(global_stats, pred): res = [ ] for row in global_stats: if pred(row['id']): res.append(row) assert len(res) != 0, "Missing stats row" return res def check_sum_stat(path, iterable, expected): def walk_object(path, o): for p in path: o = o[p] return o total = 0.0 for item in iterable: # Don't count the row if it errored - the stats are missing anyway if 'error' not in item: total += walk_object(path, item) if 'error' not in expected: assert fuzzy_compare(total, walk_object(path, expected)), \ "Stats (%s) did not add up, expected %f, got %f" % (repr(path), total, walk_object(expected)) # Verifies that the table_server stats add up to the table stats def check_table_stats(table_id, global_stats): table_row = find_rows(global_stats, lambda row_id: row_id == ['table', table_id]) assert len(table_row) == 1 table_row = table_row[0] table_server_rows = find_rows(global_stats, lambda row_id: len(row_id) == 3 and \ row_id[0] == 'table_server' and \ row_id[1] == table_id) check_sum_stat(['query_engine', 'read_docs_per_sec'], table_server_rows, table_row) check_sum_stat(['query_engine', 'written_docs_per_sec'], table_server_rows, table_row) # Verifies that the table_server stats add up to the server stats def check_server_stats(server_id, global_stats): server_row = find_rows(global_stats, lambda row_id: row_id == ['server', server_id]) assert len(server_row) == 1 server_row = server_row[0] table_server_rows = find_rows(global_stats, lambda row_id: len(row_id) == 3 and \ row_id[0] == 'table_server' and \ row_id[2] == server_id) check_sum_stat(['query_engine', 'read_docs_per_sec'], table_server_rows, server_row) check_sum_stat(['query_engine', 'written_docs_per_sec'], table_server_rows, server_row) check_sum_stat(['query_engine', 'read_docs_total'], table_server_rows, server_row) check_sum_stat(['query_engine', 'written_docs_total'], table_server_rows, server_row) # Verifies that table and server stats add up to the cluster stats def check_cluster_stats(global_stats): cluster_row = find_rows(global_stats, lambda row_id: row_id == ['cluster']) assert len(cluster_row) == 1 cluster_row = cluster_row[0] table_rows = find_rows(global_stats, lambda row_id: len(row_id) == 2 and row_id[0] == 'table') check_sum_stat(['query_engine', 'read_docs_per_sec'], table_rows, cluster_row) check_sum_stat(['query_engine', 'written_docs_per_sec'], table_rows, cluster_row) server_rows = find_rows(global_stats, lambda row_id: len(row_id) == 2 and row_id[0] == 'server') check_sum_stat(['query_engine', 'read_docs_per_sec'], server_rows, cluster_row) check_sum_stat(['query_engine', 'written_docs_per_sec'], server_rows, cluster_row) check_sum_stat(['query_engine', 'client_connections'], server_rows, cluster_row) check_sum_stat(['query_engine', 'clients_active'], server_rows, cluster_row) def get_and_check_global_stats(tables, servers, conn): global_stats = list(r.db('rethinkdb').table('stats').run(conn)) check_cluster_stats(global_stats) for table in tables: check_table_stats(table['id'], global_stats) numServers = 0 for server in servers: numServers += 1 check_server_stats(server.uuid, global_stats) assert len(global_stats) == 1 + len(tables) + numServers + (len(tables) * numServers) return global_stats def get_individual_stats(global_stats, conn): res = [ ] for row in global_stats: rerow = r.db('rethinkdb').table('stats').get(row['id']).run(conn) assert isinstance(rerow, dict) assert rerow['id'] == row['id'] res.append(rerow) return res # Global and individual stats should be in the same order # This also assumes that the individual stats were collected after the global stats # The only thing we know about `per_sec` stats is that they are non-zero # For `total` stats, we can check that they only increase with time def compare_global_and_individual_stats(global_stats, individual_stats): assert len(global_stats) == len(individual_stats) for i in xrange(len(global_stats)): a = global_stats[i] b = individual_stats[i] assert a['id'] == b['id'] if a['id'][0] == 'cluster': assert a['query_engine']['queries_per_sec'] > 0 assert b['query_engine']['queries_per_sec'] > 0 assert a['query_engine']['read_docs_per_sec'] > 0 assert b['query_engine']['read_docs_per_sec'] > 0 assert a['query_engine']['written_docs_per_sec'] > 0 assert b['query_engine']['written_docs_per_sec'] > 0 assert a['query_engine']['client_connections'] == b['query_engine']['client_connections'] == len(table_names) + 1 elif a['id'][0] == 'server': assert a['server'] == b['server'] assert a['query_engine']['queries_per_sec'] >= 0 assert b['query_engine']['queries_per_sec'] >= 0 assert a['query_engine']['read_docs_per_sec'] > 0 assert b['query_engine']['read_docs_per_sec'] > 0 assert a['query_engine']['written_docs_per_sec'] > 0 assert b['query_engine']['written_docs_per_sec'] > 0 assert a['query_engine']['queries_total'] <= b['query_engine']['queries_total'] assert a['query_engine']['read_docs_total'] <= b['query_engine']['read_docs_total'] assert a['query_engine']['written_docs_total'] <= b['query_engine']['written_docs_total'] elif a['id'][0] == 'table': assert a['db'] == b['db'] assert a['table'] == b['table'] assert a['query_engine']['read_docs_per_sec'] > 0 assert b['query_engine']['read_docs_per_sec'] > 0 assert a['query_engine']['written_docs_per_sec'] > 0 assert b['query_engine']['written_docs_per_sec'] > 0 elif a['id'][0] == 'table_server': assert a['db'] == b['db'] assert a['table'] == b['table'] assert a['server'] == b['server'] assert a['query_engine']['read_docs_per_sec'] > 0 assert b['query_engine']['read_docs_per_sec'] > 0 assert a['query_engine']['written_docs_per_sec'] > 0 assert b['query_engine']['written_docs_per_sec'] > 0 assert a['query_engine']['read_docs_total'] <= b['query_engine']['read_docs_total'] assert a['query_engine']['written_docs_total'] <= b['query_engine']['written_docs_total'] assert a['storage_engine']['disk']['read_bytes_per_sec'] > 0 assert a['storage_engine']['disk']['written_bytes_per_sec'] > 0 assert b['storage_engine']['disk']['read_bytes_per_sec'] > 0 assert b['storage_engine']['disk']['written_bytes_per_sec'] > 0 assert a['storage_engine']['disk']['read_bytes_total'] <= b['storage_engine']['disk']['read_bytes_total'] assert a['storage_engine']['disk']['written_bytes_total'] <= b['storage_engine']['disk']['written_bytes_total'] # even though cache size is 0, the server may use more while processing a query assert a['storage_engine']['cache']['in_use_bytes'] >= 0 assert b['storage_engine']['cache']['in_use_bytes'] >= 0 # unfortunately we can't make many assumptions about the disk space assert a['storage_engine']['disk']['space_usage']['data_bytes'] >= 0 assert a['storage_engine']['disk']['space_usage']['metadata_bytes'] >= 0 assert b['storage_engine']['disk']['space_usage']['data_bytes'] >= 0 assert b['storage_engine']['disk']['space_usage']['metadata_bytes'] >= 0 else: assert False, "Unrecognized stats row id: %s" % repr(a['id']) op = vcoptparse.OptParser() scenario_common.prepare_option_parser_mode_flags(op) _, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv)) # We use a cache size of 0 to force disk reads serve_options += ['--cache-size', '0'] with driver.Cluster(initial_servers=server_names, command_prefix=command_prefix, extra_options=serve_options) as cluster: conn = r.connect(cluster[0].host, cluster[0].driver_port) utils.print_with_time('Creating %d tables...' % len(table_names)) stop_event = multiprocessing.Event() # Store uuids for each table and server for verification purposes r.db_create(db).run(conn) tables = [ ] for name in table_names: info = { 'name': name } r.db(db).table_create(name, shards=2, replicas=1).run(conn) info['db_id'] = r.db(db).config()['id'].run(conn) info['id'] = r.db(db).table(info['name']).config()['id'].run(conn) info['workload'] = multiprocessing.Process(target=read_write_workload, args=(cluster[0].driver_port, name, stop_event)) info['workload'].start() tables.append(info) # Allow some time for the workload to get the stats going time.sleep(1) try: # Perform table scan, get each row individually, and check the integrity of the results all_stats = get_and_check_global_stats(tables, cluster, conn) also_stats = get_individual_stats(all_stats, conn) compare_global_and_individual_stats(all_stats, also_stats) # Shut down one server utils.print_with_time("Killing second server...") cluster[1].stop() time.sleep(5) # Perform table scan, observe that server 1 is now gone all_stats = get_and_check_global_stats(tables, [cluster[0]], conn) also_stats = get_individual_stats(all_stats, conn) compare_global_and_individual_stats(all_stats, also_stats) # Basic test of the `_debug_stats` table debug_stats_0 = r.db('rethinkdb').table('_debug_stats').get(cluster[0].uuid).run(conn) debug_stats_1 = r.db('rethinkdb').table('_debug_stats').get(cluster[1].uuid).run(conn) assert debug_stats_0["stats"]["eventloop"]["total"] > 0 assert debug_stats_1 is None # Restart server utils.print_with_time("Restarting second server...") cluster[1].start() time.sleep(5) # Perform table scan all_stats = get_and_check_global_stats(tables, cluster, conn) also_stats = get_individual_stats(all_stats, conn) compare_global_and_individual_stats(all_stats, also_stats) # Verify that 'total' stats are non-zero def check_non_zero_totals(stats): for row in stats: if row['id'][0] == 'server': if row['id'][1] == cluster[1].uuid: assert row['query_engine']['queries_total'] == 0 else: assert row['query_engine']['queries_total'] > 0 assert row['query_engine']['read_docs_total'] > 0 assert row['query_engine']['written_docs_total'] > 0 if row['id'][0] == 'table_server': assert row['query_engine']['read_docs_total'] > 0 assert row['query_engine']['written_docs_total'] > 0 assert row['storage_engine']['disk']['read_bytes_total'] > 0 assert row['storage_engine']['disk']['written_bytes_total'] > 0 check_non_zero_totals(all_stats) check_non_zero_totals(also_stats) finally: stop_event.set() for table in tables: table['workload'].join() utils.print_with_time("Checking that stats table is not writable...") length = r.db("rethinkdb").table("stats").count().run(conn) res = r.db("rethinkdb").table("stats").delete().run(conn) assert res["errors"] == length, res res = r.db("rethinkdb").table("stats").update({"foo": "bar"}).run(conn) assert res["errors"] == length, res res = r.db("rethinkdb").table("stats").insert({}).run(conn) assert res["errors"] == 1, res cluster.check_and_stop() utils.print_with_time('Done.')
agpl-3.0
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/validation/test_picklable.py
576
2010
import pickle from unittest import TestCase from django.core.exceptions import ValidationError class PickableValidationErrorTestCase(TestCase): def test_validationerror_is_picklable(self): original = ValidationError('a', code='something') unpickled = pickle.loads(pickle.dumps(original)) self.assertIs(unpickled, unpickled.error_list[0]) self.assertEqual(original.message, unpickled.message) self.assertEqual(original.code, unpickled.code) original = ValidationError('a', code='something') unpickled = pickle.loads(pickle.dumps(ValidationError(original))) self.assertIs(unpickled, unpickled.error_list[0]) self.assertEqual(original.message, unpickled.message) self.assertEqual(original.code, unpickled.code) original = ValidationError(['a', 'b']) unpickled = pickle.loads(pickle.dumps(original)) self.assertEqual(original.error_list[0].message, unpickled.error_list[0].message) self.assertEqual(original.error_list[1].message, unpickled.error_list[1].message) original = ValidationError(['a', 'b']) unpickled = pickle.loads(pickle.dumps(ValidationError(original))) self.assertEqual(original.error_list[0].message, unpickled.error_list[0].message) self.assertEqual(original.error_list[1].message, unpickled.error_list[1].message) original = ValidationError([ValidationError('a'), ValidationError('b')]) unpickled = pickle.loads(pickle.dumps(original)) self.assertIs(unpickled.args[0][0], unpickled.error_list[0]) self.assertEqual(original.error_list[0].message, unpickled.error_list[0].message) self.assertEqual(original.error_list[1].message, unpickled.error_list[1].message) message_dict = {'field1': ['a', 'b'], 'field2': ['c', 'd']} original = ValidationError(message_dict) unpickled = pickle.loads(pickle.dumps(original)) self.assertEqual(unpickled.message_dict, message_dict)
mit
mdrumond/tensorflow
tensorflow/python/ops/control_flow_grad.py
16
9130
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in control_flow_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops # go/tf-wildcard-import # pylint: disable=wildcard-import,undefined-variable from tensorflow.python.ops.control_flow_ops import * from tensorflow.python.ops.gen_control_flow_ops import * # pylint: enable=wildcard-import def _SwitchGrad(op, *grad): """Gradients for a Switch op is calculated using a Merge op. If the switch is a loop switch, it will be visited twice. We create the merge on the first visit, and update the other input of the merge on the second visit. A next_iteration is also added on second visit. """ graph = ops.get_default_graph() # pylint: disable=protected-access op_ctxt = op._get_control_flow_context() grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if isinstance(op_ctxt, WhileContext): merge_grad = grad_ctxt.grad_state.switch_map.get(op) if merge_grad is not None: # This is the second time this Switch is visited. It comes from # the non-exit branch of the Switch, so update the second input # to the Merge. # TODO(yuanbyu): Perform shape inference with this new input. if grad[1] is not None: # pylint: disable=protected-access control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1]) # pylint: enable=protected-access return None, None elif grad[0] is not None: # This is the first time this Switch is visited. It comes from # the Exit branch, which is grad[0]. grad[1] is empty at this point. # Use grad[0] for both inputs to merge for now, but update the second # input of merge when we see this Switch the second time. merge_grad = merge([grad[0], grad[0]], name="b_switch")[0] grad_ctxt.grad_state.switch_map[op] = merge_grad return merge_grad, None else: # This is the first time this Switch is visited. It comes from the # Identity branch. Such a Switch has `None` gradient for the Exit branch, # meaning the output is not differentiable. return None, None elif isinstance(op_ctxt, CondContext): good_grad = grad[op_ctxt.branch] zero_grad = grad[1 - op_ctxt.branch] # At this point, we have created zero_grad guarded by the right switch. # Unfortunately, we may still get None here for not trainable data types. if zero_grad is None: return None, None return merge([good_grad, zero_grad], name="cond_grad")[0], None else: false_grad = switch(grad[0], op.inputs[1])[0] true_grad = switch(grad[1], op.inputs[1])[1] return merge([false_grad, true_grad])[0], None ops.RegisterGradient("Switch")(_SwitchGrad) ops.RegisterGradient("RefSwitch")(_SwitchGrad) @ops.RegisterGradient("Merge") def _MergeGrad(op, grad, _): """Gradients for a Merge op are calculated using a Switch op.""" input_op = op.inputs[0].op graph = ops.get_default_graph() # pylint: disable=protected-access op_ctxt = control_flow_ops._GetOutputContext(input_op) grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if isinstance(op_ctxt, WhileContext): # pylint: disable=protected-access return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot) # pylint: enable=protected-access elif isinstance(op_ctxt, CondContext): pred = op_ctxt.pred if grad_ctxt and grad_ctxt.grad_state: # This Merge node is part of a cond within a loop. # The backprop needs to have the value of this predicate for every # iteration. So we must have its values accumulated in the forward, and # use the accumulated values as the predicate for this backprop switch. grad_state = grad_ctxt.grad_state real_pred = grad_state.history_map.get(pred.name) if real_pred is None: # Remember the value of pred for every iteration. grad_ctxt = grad_state.grad_context grad_ctxt.Exit() history_pred = grad_state.AddForwardAccumulator(pred) grad_ctxt.Enter() # Add the stack pop op. If pred.op is in a (outer) CondContext, # the stack pop will be guarded with a switch. real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred) grad_state.history_map[pred.name] = real_pred pred = real_pred # pylint: disable=protected-access return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad") # pylint: enable=protected-access else: num_inputs = len(op.inputs) cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)] # pylint: disable=protected-access return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1] for i in xrange(num_inputs)] # pylint: enable=protected-access @ops.RegisterGradient("RefMerge") def _RefMergeGrad(op, grad, _): return _MergeGrad(op, grad, _) @ops.RegisterGradient("Exit") def _ExitGrad(op, grad): """Gradients for an exit op are calculated using an Enter op.""" graph = ops.get_default_graph() # pylint: disable=protected-access grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if not grad_ctxt.back_prop: # The flag `back_prop` is set by users to suppress gradient # computation for this loop. If the attribute `back_prop` is false, # no gradient computation. return None # pylint: disable=protected-access if op._get_control_flow_context().grad_state: raise TypeError("Second-order gradient for while loops not supported.") # pylint: enable=protected-access if isinstance(grad, ops.Tensor): grad_ctxt.AddName(grad.name) else: if not isinstance(grad, (ops.IndexedSlices, sparse_tensor.SparseTensor)): raise TypeError("Type %s not supported" % type(grad)) grad_ctxt.AddName(grad.values.name) grad_ctxt.AddName(grad.indices.name) dense_shape = grad.dense_shape if dense_shape is not None: grad_ctxt.AddName(dense_shape.name) grad_ctxt.Enter() # pylint: disable=protected-access result = control_flow_ops._Enter( grad, grad_ctxt.name, is_constant=False, parallel_iterations=grad_ctxt.parallel_iterations, name="b_exit") # pylint: enable=protected-access grad_ctxt.loop_enters.append(result) grad_ctxt.Exit() return result ops.RegisterGradient("RefExit")(_ExitGrad) @ops.RegisterGradient("NextIteration") def _NextIterationGrad(_, grad): """A forward next_iteration is translated into a backprop identity. Note that the backprop next_iteration is added in switch grad. """ return grad @ops.RegisterGradient("RefNextIteration") def _RefNextIterationGrad(_, grad): return _NextIterationGrad(_, grad) @ops.RegisterGradient("Enter") def _EnterGrad(op, grad): """Gradients for an Enter are calculated using an Exit op. For loop variables, grad is the gradient so just add an exit. For loop invariants, we need to add an accumulator loop. """ graph = ops.get_default_graph() # pylint: disable=protected-access grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if not grad_ctxt.back_prop: # Skip gradient computation, if the attribute `back_prop` is false. return grad if grad_ctxt.grad_state is None: # Pass the gradient through if we are not in a gradient while context. return grad if op.get_attr("is_constant"): # Add a gradient accumulator for each loop invariant. if isinstance(grad, ops.Tensor): result = grad_ctxt.AddBackpropAccumulator(op, grad) elif isinstance(grad, ops.IndexedSlices): result = grad_ctxt.AddBackpropIndexedSlicesAccumulator(op, grad) else: # TODO(yuanbyu, lukasr): Add support for SparseTensor. raise TypeError("Type %s not supported" % type(grad)) else: result = exit(grad) grad_ctxt.loop_exits.append(result) grad_ctxt.ExitResult([result]) return result @ops.RegisterGradient("RefEnter") def _RefEnterGrad(op, grad): return _EnterGrad(op, grad) @ops.RegisterGradient("LoopCond") def _LoopCondGrad(_): """Stop backprop for the predicate of a while loop.""" return None
apache-2.0
taedori81/wagtail
wagtail/wagtailimages/tests/test_rich_text.py
13
2051
from django.test import TestCase from bs4 import BeautifulSoup from mock import patch from wagtail.wagtailimages.rich_text import ImageEmbedHandler class TestImageEmbedHandler(TestCase): def test_get_db_attributes(self): soup = BeautifulSoup( '<b data-id="test-id" data-format="test-format" data-alt="test-alt">foo</b>' ) tag = soup.b result = ImageEmbedHandler.get_db_attributes(tag) self.assertEqual(result, {'alt': 'test-alt', 'id': 'test-id', 'format': 'test-format'}) def test_expand_db_attributes_page_does_not_exist(self): result = ImageEmbedHandler.expand_db_attributes( {'id': 0}, False ) self.assertEqual(result, '<img>') @patch('wagtail.wagtailimages.models.Image') @patch('django.core.files.File') def test_expand_db_attributes_not_for_editor(self, mock_file, mock_image): result = ImageEmbedHandler.expand_db_attributes( {'id': 1, 'alt': 'test-alt', 'format': 'left'}, False ) self.assertIn('<img class="richtext-image left"', result) @patch('wagtail.wagtailimages.models.Image') @patch('django.core.files.File') def test_expand_db_attributes_for_editor(self, mock_file, mock_image): result = ImageEmbedHandler.expand_db_attributes( {'id': 1, 'alt': 'test-alt', 'format': 'left'}, True ) self.assertIn('<img data-embedtype="image" data-id="1" data-format="left" data-alt="test-alt" class="richtext-image left"', result) @patch('wagtail.wagtailimages.models.Image') @patch('django.core.files.File') def test_expand_db_attributes_for_editor_throws_exception(self, mock_file, mock_image): result = ImageEmbedHandler.expand_db_attributes( {'id': 1, 'format': 'left'}, True ) self.assertEqual(result, '')
bsd-3-clause
bolkedebruin/airflow
tests/providers/datadog/hooks/test_datadog.py
1
4769
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import json import unittest from unittest import mock from airflow.exceptions import AirflowException from airflow.models import Connection from airflow.providers.datadog.hooks.datadog import DatadogHook APP_KEY = 'app_key' API_KEY = 'api_key' METRIC_NAME = 'metric' DATAPOINT = 7 TAGS = ['tag'] TYPE = 'rate' INTERVAL = 30 TITLE = 'title' TEXT = 'text' AGGREGATION_KEY = 'aggregation-key' ALERT_TYPE = 'warning' DATE_HAPPENED = 12345 HANDLE = 'handle' PRIORITY = 'normal' RELATED_EVENT_ID = 7 DEVICE_NAME = 'device-name' class TestDatadogHook(unittest.TestCase): @mock.patch('airflow.providers.datadog.hooks.datadog.initialize') @mock.patch('airflow.providers.datadog.hooks.datadog.DatadogHook.get_connection') def setUp(self, mock_get_connection, mock_initialize): mock_get_connection.return_value = Connection(extra=json.dumps({ 'app_key': APP_KEY, 'api_key': API_KEY, })) self.hook = DatadogHook() @mock.patch('airflow.providers.datadog.hooks.datadog.initialize') @mock.patch('airflow.providers.datadog.hooks.datadog.DatadogHook.get_connection') def test_api_key_required(self, mock_get_connection, mock_initialize): mock_get_connection.return_value = Connection() with self.assertRaises(AirflowException) as ctx: DatadogHook() self.assertEqual(str(ctx.exception), 'api_key must be specified in the Datadog connection details') def test_validate_response_valid(self): try: self.hook.validate_response({'status': 'ok'}) except AirflowException: self.fail('Unexpected AirflowException raised') def test_validate_response_invalid(self): with self.assertRaises(AirflowException): self.hook.validate_response({'status': 'error'}) @mock.patch('airflow.providers.datadog.hooks.datadog.api.Metric.send') def test_send_metric(self, mock_send): mock_send.return_value = {'status': 'ok'} self.hook.send_metric( METRIC_NAME, DATAPOINT, tags=TAGS, type_=TYPE, interval=INTERVAL, ) mock_send.assert_called_once_with( metric=METRIC_NAME, points=DATAPOINT, host=self.hook.host, tags=TAGS, type=TYPE, interval=INTERVAL, ) @mock.patch('airflow.providers.datadog.hooks.datadog.api.Metric.query') @mock.patch('airflow.providers.datadog.hooks.datadog.time.time') def test_query_metric(self, mock_time, mock_query): now = 12345 mock_time.return_value = now mock_query.return_value = {'status': 'ok'} self.hook.query_metric('query', 60, 30) mock_query.assert_called_once_with( start=now - 60, end=now - 30, query='query', ) @mock.patch('airflow.providers.datadog.hooks.datadog.api.Event.create') def test_post_event(self, mock_create): mock_create.return_value = {'status': 'ok'} self.hook.post_event( TITLE, TEXT, aggregation_key=AGGREGATION_KEY, alert_type=ALERT_TYPE, date_happened=DATE_HAPPENED, handle=HANDLE, priority=PRIORITY, related_event_id=RELATED_EVENT_ID, tags=TAGS, device_name=DEVICE_NAME, ) mock_create.assert_called_once_with( title=TITLE, text=TEXT, aggregation_key=AGGREGATION_KEY, alert_type=ALERT_TYPE, date_happened=DATE_HAPPENED, handle=HANDLE, priority=PRIORITY, related_event_id=RELATED_EVENT_ID, tags=TAGS, host=self.hook.host, device_name=DEVICE_NAME, source_type_name=self.hook.source_type_name, ) if __name__ == '__main__': unittest.main()
apache-2.0
wbrefvem/github3.py
tests/test_notifications.py
5
3078
import github3 import datetime from tests.utils import BaseCase, load class TestThread(BaseCase): def __init__(self, methodName='runTest'): super(TestThread, self).__init__(methodName) self.thread = github3.notifications.Thread(load('notification')) self.api = ("https://api.github.com/notifications/threads/6169361") def test_equality(self): t = github3.notifications.Thread(load('notification')) assert self.thread == t t._uniq = 1 assert self.thread != t def test_last_read_at(self): json = self.thread.as_dict().copy() json['last_read_at'] = '2013-12-31T23:59:59Z' t = github3.notifications.Thread(json) assert isinstance(t.last_read_at, datetime.datetime) def test_repr(self): assert repr(self.thread) == '<Thread [{0}]>'.format( self.thread.subject.get('title')) def test_delete_subscription(self): self.response('', 204) self.delete(self.api + '/subscription') assert self.thread.delete_subscription() self.mock_assertions() def test_is_unread(self): assert self.thread.is_unread() == self.thread.unread def test_mark(self): self.response('', 205) self.patch(self.api) self.conf = {} assert self.thread.mark() self.mock_assertions() def test_set_subscription(self): self.response('subscription') self.put(self.api + '/subscription') self.conf = {'data': {'subscribed': True, 'ignored': False}} assert isinstance(self.thread.set_subscription(True, False), github3.notifications.Subscription) self.mock_assertions() def test_subscription(self): self.response('subscription') self.get(self.api + '/subscription') assert isinstance(self.thread.subscription(), github3.notifications.Subscription) self.mock_assertions() class TestSubscription(BaseCase): def __init__(self, methodName='runTest'): super(TestSubscription, self).__init__(methodName) self.subscription = github3.notifications.Subscription( load('subscription')) self.api = ("https://api.github.com/notifications/threads/5864188/" "subscription") def test_repr(self): assert isinstance(repr(self.subscription), str) def test_delete(self): self.response('', 204) self.delete(self.api) assert self.subscription.delete() self.mock_assertions() def test_is_ignored(self): assert self.subscription.is_ignored() == self.subscription.ignored def test_is_subscription(self): subbed = self.subscription.is_subscribed() assert subbed == self.subscription.subscribed def test_set(self): self.response('subscription') self.put(self.api) self.conf = {'data': {'subscribed': True, 'ignored': False}} assert self.subscription.set(True, False) is None self.mock_assertions()
bsd-3-clause
chrisfilda/edx_platform
lms/envs/common.py
1
51252
# -*- coding: utf-8 -*- """ This is the common settings file, intended to set sane defaults. If you have a piece of configuration that's dependent on a set of feature flags being set, then create a function that returns the calculated value based on the value of FEATURES[...]. Modules that extend this one can change the feature configuration in an environment specific config file and re-calculate those values. We should make a method that calls all these config methods so that you just make one call at the end of your site-specific dev file to reset all the dependent variables (like INSTALLED_APPS) for you. Longer TODO: 1. Right now our treatment of static content in general and in particular course-specific static content is haphazard. 2. We should have a more disciplined approach to feature flagging, even if it just means that we stick them in a dict called FEATURES. 3. We need to handle configuration for multiple courses. This could be as multiple sites, but we do need a way to map their data assets. """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=W0401, W0611, W0614, C0103 import sys import os import imp import json from path import path from .discussionsettings import * from lms.lib.xblock.mixin import LmsBlockMixin ################################### FEATURES ################################### # The display name of the platform to be used in templates/emails/etc. PLATFORM_NAME = "edX" CC_MERCHANT_NAME = PLATFORM_NAME COURSEWARE_ENABLED = True ENABLE_JASMINE = False DISCUSSION_SETTINGS = { 'MAX_COMMENT_DEPTH': 2, } # Features FEATURES = { 'SAMPLE': False, 'USE_DJANGO_PIPELINE': True, 'DISPLAY_DEBUG_INFO_TO_STAFF': True, 'DISPLAY_HISTOGRAMS_TO_STAFF': False, # For large courses this slows down courseware access for staff. 'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails 'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose ## DO NOT SET TO True IN THIS FILE ## Doing so will cause all courses to be released on production 'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date # When True, will only publicly list courses by the subdomain. Expects you # to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of # course_ids (see dev_int.py for an example) 'SUBDOMAIN_COURSE_LISTINGS': False, # When True, will override certain branding with university specific values # Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the # university to use for branding purposes 'SUBDOMAIN_BRANDING': False, 'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST # set to None to do no university selection # for consistency in user-experience, keep the value of the following 3 settings # in sync with the corresponding ones in cms/envs/common.py 'ENABLE_DISCUSSION_SERVICE': True, 'ENABLE_TEXTBOOK': True, 'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI. # discussion home panel, which includes a subscription on/off setting for discussion digest emails. # this should remain off in production until digest notifications are online. 'ENABLE_DISCUSSION_HOME_PANEL': False, 'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard) 'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops) 'ENABLE_SQL_TRACKING_LOGS': False, 'ENABLE_LMS_MIGRATION': False, 'ENABLE_MANUAL_GIT_RELOAD': False, 'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware 'ENABLE_SYSADMIN_DASHBOARD': False, # sysadmin dashboard, to see what courses are loaded, to delete & load courses 'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL # extrernal access methods 'ACCESS_REQUIRE_STAFF_FOR_COURSE': False, 'AUTH_USE_OPENID': False, 'AUTH_USE_CERTIFICATES': False, 'AUTH_USE_OPENID_PROVIDER': False, # Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled # in LMS 'AUTH_USE_SHIB': False, 'AUTH_USE_CAS': False, # This flag disables the requirement of having to agree to the TOS for users registering # with Shib. Feature was requested by Stanford's office of general counsel 'SHIB_DISABLE_TOS': False, # Can be turned off if course lists need to be hidden. Effects views and templates. 'COURSES_ARE_BROWSABLE': True, # Enables ability to restrict enrollment in specific courses by the user account login method 'RESTRICT_ENROLL_BY_REG_METHOD': False, # analytics experiments 'ENABLE_INSTRUCTOR_ANALYTICS': False, # Enables the LMS bulk email feature for course staff 'ENABLE_INSTRUCTOR_EMAIL': True, # If True and ENABLE_INSTRUCTOR_EMAIL: Forces email to be explicitly turned on # for each course via django-admin interface. # If False and ENABLE_INSTRUCTOR_EMAIL: Email will be turned on by default # for all Mongo-backed courses. 'REQUIRE_COURSE_EMAIL_AUTH': True, # enable analytics server. # WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL # LMS OPERATION. See analytics.py for details about what # this does. 'RUN_AS_ANALYTICS_SERVER_ENABLED': False, # Flip to True when the YouTube iframe API breaks (again) 'USE_YOUTUBE_OBJECT_API': False, # Give a UI to show a student's submission history in a problem by the # Staff Debug tool. 'ENABLE_STUDENT_HISTORY_VIEW': True, # segment.io for LMS--need to explicitly turn it on for production. 'SEGMENT_IO_LMS': False, # Provide a UI to allow users to submit feedback from the LMS (left-hand help modal) 'ENABLE_FEEDBACK_SUBMISSION': False, # Turn on a page that lets staff enter Python code to be run in the # sandbox, for testing whether it's enabled properly. 'ENABLE_DEBUG_RUN_PYTHON': False, # Enable URL that shows information about the status of variuous services 'ENABLE_SERVICE_STATUS': False, # Toggle to indicate use of a custom theme 'USE_CUSTOM_THEME': False, # Don't autoplay videos for students 'AUTOPLAY_VIDEOS': False, # Enable instructor dash to submit background tasks 'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True, # Enable instructor to assign individual due dates 'INDIVIDUAL_DUE_DATES': False, # Enable instructor dash beta version link 'ENABLE_INSTRUCTOR_BETA_DASHBOARD': True, # Toggle to enable certificates of courses on dashboard 'ENABLE_VERIFIED_CERTIFICATES': False, # Allow use of the hint managment instructor view. 'ENABLE_HINTER_INSTRUCTOR_VIEW': False, # for load testing 'AUTOMATIC_AUTH_FOR_TESTING': False, # Toggle to enable chat availability (configured on a per-course # basis in Studio) 'ENABLE_CHAT': False, # Allow users to enroll with methods other than just honor code certificates 'MULTIPLE_ENROLLMENT_ROLES': False, # Toggle the availability of the shopping cart page 'ENABLE_SHOPPING_CART': False, # Toggle storing detailed billing information 'STORE_BILLING_INFO': False, # Enable flow for payments for course registration (DIFFERENT from verified student flow) 'ENABLE_PAID_COURSE_REGISTRATION': False, # Automatically approve student identity verification attempts 'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': False, # Disable instructor dash buttons for downloading course data # when enrollment exceeds this number 'MAX_ENROLLMENT_INSTR_BUTTONS': 200, # Grade calculation started from the new instructor dashboard will write # grades CSV files to S3 and give links for downloads. 'ENABLE_S3_GRADE_DOWNLOADS': False, # whether to use password policy enforcement or not 'ENFORCE_PASSWORD_POLICY': False, # Give course staff unrestricted access to grade downloads (if set to False, # only edX superusers can perform the downloads) 'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False, 'ENABLED_PAYMENT_REPORTS': ["refund_report", "itemized_purchase_report", "university_revenue_share", "certificate_status"], # Turn off account locking if failed login attempts exceeds a limit 'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False, # Hide any Personally Identifiable Information from application logs 'SQUELCH_PII_IN_LOGS': False, # Toggle embargo functionality 'EMBARGO': False, # Whether the Wiki subsystem should be accessible via the direct /wiki/ paths. Setting this to True means # that people can submit content and modify the Wiki in any arbitrary manner. We're leaving this as True in the # defaults, so that we maintain current behavior 'ALLOW_WIKI_ROOT_ACCESS': True, # Turn on/off Microsites feature 'USE_MICROSITES': False, # Turn on third-party auth. Disabled for now because full implementations are not yet available. Remember to syncdb # if you enable this; we don't create tables by default. 'ENABLE_THIRD_PARTY_AUTH': False, # Toggle to enable alternate urls for marketing links 'ENABLE_MKTG_SITE': False, # Prevent concurrent logins per user 'PREVENT_CONCURRENT_LOGINS': False, # Turn off Advanced Security by default 'ADVANCED_SECURITY': False, # Show a "Download your certificate" on the Progress page if the lowest # nonzero grade cutoff is met 'SHOW_PROGRESS_SUCCESS_BUTTON': False, } # Used for A/B testing DEFAULT_GROUPS = [] # If this is true, random scores will be generated for the purpose of debugging the profile graphs GENERATE_PROFILE_SCORES = False # Used with XQueue XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds ############################# SET PATH INFORMATION ############################# PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/lms REPO_ROOT = PROJECT_ROOT.dirname() COMMON_ROOT = REPO_ROOT / "common" ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in COURSES_ROOT = ENV_ROOT / "data" DATA_DIR = COURSES_ROOT # TODO: Remove the rest of the sys.path modification here and in cms/envs/common.py sys.path.append(REPO_ROOT) sys.path.append(PROJECT_ROOT / 'djangoapps') sys.path.append(COMMON_ROOT / 'djangoapps') sys.path.append(COMMON_ROOT / 'lib') # For Node.js system_node_path = os.environ.get("NODE_PATH", REPO_ROOT / 'node_modules') node_paths = [ COMMON_ROOT / "static/js/vendor", COMMON_ROOT / "static/coffee/src", system_node_path, ] NODE_PATH = ':'.join(node_paths) # For geolocation ip database GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat" # Where to look for a status message STATUS_MESSAGE_PATH = ENV_ROOT / "status_message.json" ############################ OpenID Provider ################################## OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net'] ################################## EDX WEB ##################################### # This is where we stick our compiled template files. Most of the app uses Mako # templates from tempdir import mkdtemp_clean MAKO_MODULE_DIR = mkdtemp_clean('mako') MAKO_TEMPLATES = {} MAKO_TEMPLATES['main'] = [PROJECT_ROOT / 'templates', COMMON_ROOT / 'templates', COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates', COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates'] # This is where Django Template lookup is defined. There are a few of these # still left lying around. TEMPLATE_DIRS = [ PROJECT_ROOT / "templates", COMMON_ROOT / 'templates', COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates', COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates', ] TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.request', 'django.core.context_processors.static', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.i18n', 'django.contrib.auth.context_processors.auth', # this is required for admin 'django.core.context_processors.csrf', # Added for django-wiki 'django.core.context_processors.media', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'sekizai.context_processors.sekizai', # Hack to get required link URLs to password reset templates 'edxmako.shortcuts.marketing_link_context_processor', # Shoppingcart processor (detects if request.user has a cart) 'shoppingcart.context_processor.user_has_cart_context_processor', ) # use the ratelimit backend to prevent brute force attacks AUTHENTICATION_BACKENDS = ( 'ratelimitbackend.backends.RateLimitModelBackend', ) STUDENT_FILEUPLOAD_MAX_SIZE = 4 * 1000 * 1000 # 4 MB MAX_FILEUPLOADS_PER_INPUT = 20 # FIXME: # We should have separate S3 staged URLs in case we need to make changes to # these assets and test them. LIB_URL = '/static/js/' # Dev machines shouldn't need the book # BOOK_URL = '/static/book/' BOOK_URL = 'https://mitxstatic.s3.amazonaws.com/book_images/' # For AWS deploys RSS_TIMEOUT = 600 # Configuration option for when we want to grab server error pages STATIC_GRAB = False DEV_CONTENT = True EDX_ROOT_URL = '' LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/accounts/login' LOGIN_URL = EDX_ROOT_URL + '/accounts/login' COURSE_NAME = "6.002_Spring_2012" COURSE_NUMBER = "6.002x" COURSE_TITLE = "Circuits and Electronics" ### Dark code. Should be enabled in local settings for devel. ENABLE_MULTICOURSE = False # set to False to disable multicourse display (see lib.util.views.edXhome) WIKI_ENABLED = False ### COURSE_DEFAULT = '6.002x_Fall_2012' COURSE_SETTINGS = { '6.002x_Fall_2012': { 'number': '6.002x', 'title': 'Circuits and Electronics', 'xmlpath': '6002x/', 'location': 'i4x://edx/6002xs12/course/6.002x_Fall_2012', } } # IP addresses that are allowed to reload the course, etc. # TODO (vshnayder): Will probably need to change as we get real access control in. LMS_MIGRATION_ALLOWED_IPS = [] ############################## EVENT TRACKING ################################# # FIXME: Should we be doing this truncation? TRACK_MAX_EVENT = 50000 DEBUG_TRACK_LOG = False TRACKING_BACKENDS = { 'logger': { 'ENGINE': 'track.backends.logger.LoggerBackend', 'OPTIONS': { 'name': 'tracking' } } } # We're already logging events, and we don't want to capture user # names/passwords. Heartbeat events are likely not interesting. TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat'] EVENT_TRACKING_ENABLED = True EVENT_TRACKING_BACKENDS = { 'logger': { 'ENGINE': 'eventtracking.backends.logger.LoggerBackend', 'OPTIONS': { 'name': 'tracking', 'max_event_size': TRACK_MAX_EVENT, } } } EVENT_TRACKING_PROCESSORS = [ { 'ENGINE': 'track.shim.LegacyFieldMappingProcessor' } ] # Backwards compatibility with ENABLE_SQL_TRACKING_LOGS feature flag. # In the future, adding the backend to TRACKING_BACKENDS should be enough. if FEATURES.get('ENABLE_SQL_TRACKING_LOGS'): TRACKING_BACKENDS.update({ 'sql': { 'ENGINE': 'track.backends.django.DjangoBackend' } }) EVENT_TRACKING_BACKENDS.update({ 'sql': { 'ENGINE': 'track.backends.django.DjangoBackend' } }) ######################## GOOGLE ANALYTICS ########################### GOOGLE_ANALYTICS_ACCOUNT = 'GOOGLE_ANALYTICS_ACCOUNT_DUMMY' GOOGLE_ANALYTICS_LINKEDIN = 'GOOGLE_ANALYTICS_LINKEDIN_DUMMY' ######################## subdomain specific settings ########################### COURSE_LISTINGS = {} SUBDOMAIN_BRANDING = {} VIRTUAL_UNIVERSITIES = [] ############################### XModule Store ################################## MODULESTORE = { 'default': { 'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore', 'OPTIONS': { 'data_dir': DATA_DIR, 'default_class': 'xmodule.hidden_module.HiddenDescriptor', } } } CONTENTSTORE = None DOC_STORE_CONFIG = { 'host': 'localhost', 'db': 'xmodule', 'collection': 'modulestore', } ############# XBlock Configuration ########## # Import after sys.path fixup from xmodule.modulestore.inheritance import InheritanceMixin from xmodule.modulestore import prefer_xmodules from xmodule.x_module import XModuleMixin # This should be moved into an XBlock Runtime/Application object # once the responsibility of XBlock creation is moved out of modulestore - cpennington XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin) # Allow any XBlock in the LMS XBLOCK_SELECT_FUNCTION = prefer_xmodules #################### Python sandbox ############################################ CODE_JAIL = { # Path to a sandboxed Python executable. None means don't bother. 'python_bin': None, # User to run as in the sandbox. 'user': 'sandbox', # Configurable limits. 'limits': { # How many CPU seconds can jailed code use? 'CPU': 1, }, } # Some courses are allowed to run unsafe code. This is a list of regexes, one # of them must match the course id for that course to run unsafe code. # # For example: # # COURSES_WITH_UNSAFE_CODE = [ # r"Harvard/XY123.1/.*" # ] COURSES_WITH_UNSAFE_CODE = [] ############################### DJANGO BUILT-INS ############################### # Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here DEBUG = False TEMPLATE_DEBUG = False USE_TZ = True # CMS base CMS_BASE = 'localhost:8001' # Site info SITE_ID = 1 SITE_NAME = "edx.org" HTTPS = 'on' ROOT_URLCONF = 'lms.urls' # NOTE: Please set ALLOWED_HOSTS to some sane value, as we do not allow the default '*' # Platform Email EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' DEFAULT_FROM_EMAIL = 'registration@example.com' DEFAULT_FEEDBACK_EMAIL = 'feedback@example.com' SERVER_EMAIL = 'devops@example.com' TECH_SUPPORT_EMAIL = 'technical@example.com' CONTACT_EMAIL = 'info@example.com' BUGS_EMAIL = 'bugs@example.com' ADMINS = () MANAGERS = ADMINS # Static content STATIC_URL = '/static/' ADMIN_MEDIA_PREFIX = '/static/admin/' STATIC_ROOT = ENV_ROOT / "staticfiles" STATICFILES_DIRS = [ COMMON_ROOT / "static", PROJECT_ROOT / "static", ] FAVICON_PATH = 'images/favicon.ico' # Locale/Internationalization TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html # Sourced from http://www.localeplanet.com/icu/ and wikipedia LANGUAGES = ( ('en', u'English'), ('eo', u'Dummy Language (Esperanto)'), # Dummy languaged used for testing ('fake2', u'Fake translations'), # Another dummy language for testing (not pushed to prod) ('ar', u'العربية'), # Arabic ('az', u'azərbaycanca'), # Azerbaijani ('bg-bg', u'български (България)'), # Bulgarian (Bulgaria) ('bn-bd', u'বাংলা (বাংলাদেশ)'), # Bengali (Bangladesh) ('bn-in', u'বাংলা (ভারত)'), # Bengali (India) ('bs', u'bosanski'), # Bosnian ('ca', u'Català'), # Catalan ('ca@valencia', u'Català (València)'), # Catalan (Valencia) ('cs', u'Čeština'), # Czech ('cy', u'Cymraeg'), # Welsh ('da', u'dansk'), # Danish ('de-de', u'Deutsch (Deutschland)'), # German (Germany) ('el', u'Ελληνικά'), # Greek ('en@lolcat', u'LOLCAT English'), # LOLCAT English ('en@pirate', u'Pirate English'), # Pirate English ('es-419', u'Español (Latinoamérica)'), # Spanish (Latin America) ('es-ar', u'Español (Argentina)'), # Spanish (Argentina) ('es-ec', u'Español (Ecuador)'), # Spanish (Ecuador) ('es-es', u'Español (España)'), # Spanish (Spain) ('es-mx', u'Español (México)'), # Spanish (Mexico) ('es-pe', u'Español (Perú)'), # Spanish (Peru) ('et-ee', u'Eesti (Eesti)'), # Estonian (Estonia) ('eu-es', u'euskara (Espainia)'), # Basque (Spain) ('fa', u'فارسی'), # Persian ('fa-ir', u'فارسی (ایران)'), # Persian (Iran) ('fi-fi', u'Suomi (Suomi)'), # Finnish (Finland) ('fr', u'Français'), # French ('gl', u'Galego'), # Galician ('he', u'עברית'), # Hebrew ('hi', u'हिन्दी'), # Hindi ('hu', u'magyar'), # Hungarian ('hy-am', u'Հայերեն (Հայաստան)'), # Armenian (Armenia) ('id', u'Bahasa Indonesia'), # Indonesian ('it-it', u'Italiano (Italia)'), # Italian (Italy) ('ja-jp', u'日本語(日本)'), # Japanese (Japan) ('kk-kz', u'қазақ тілі (Қазақстан)'), # Kazakh (Kazakhstan) ('km-kh', u'ភាសាខ្មែរ (កម្ពុជា)'), # Khmer (Cambodia) ('kn', u'ಕನ್ನಡ'), # Kannada ('ko-kr', u'한국어(대한민국)'), # Korean (Korea) ('lt-lt', u'Lietuvių (Lietuva)'), # Lithuanian (Lithuania) ('ml', u'മലയാളം'), # Malayalam ('mn', u'Монгол хэл'), # Mongolian ('ms', u'Bahasa Melayu'), # Malay ('nb', u'Norsk bokmål'), # Norwegian Bokmål ('ne', u'नेपाली'), # Nepali ('nl-nl', u'Nederlands (Nederland)'), # Dutch (Netherlands) ('pl', u'Polski'), # Polish ('pt-br', u'Português (Brasil)'), # Portuguese (Brazil) ('pt-pt', u'Português (Portugal)'), # Portuguese (Portugal) ('ro', u'română'), # Romanian ('ru', u'Русский'), # Russian ('si', u'සිංහල'), # Sinhala ('sk', u'Slovenčina'), # Slovak ('sl', u'Slovenščina'), # Slovenian ('th', u'ไทย'), # Thai ('tr-tr', u'Türkçe (Türkiye)'), # Turkish (Turkey) ('uk', u'Українська'), # Ukranian ('ur', u'اردو'), # Urdu ('vi', u'Tiếng Việt'), # Vietnamese ('zh-cn', u'中文(简体)'), # Chinese (China) ('zh-tw', u'中文(台灣)'), # Chinese (Taiwan) ) LANGUAGE_DICT = dict(LANGUAGES) USE_I18N = True USE_L10N = True # Localization strings (e.g. django.po) are under this directory LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/ # Messages MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' #################################### GITHUB ####################################### # gitreload is used in LMS-workflow to pull content from github # gitreload requests are only allowed from these IP addresses, which are # the advertised public IPs of the github WebHook servers. # These are listed, eg at https://github.com/edx/edx-platform/admin/hooks ALLOWED_GITRELOAD_IPS = ['207.97.227.253', '50.57.128.197', '108.171.174.178'] #################################### AWS ####################################### # S3BotoStorage insists on a timeout for uploaded assets. We should make it # permanent instead, but rather than trying to figure out exactly where that # setting is, I'm just bumping the expiration time to something absurd (100 # years). This is only used if DEFAULT_FILE_STORAGE is overriden to use S3 # in the global settings.py AWS_QUERYSTRING_EXPIRE = 10 * 365 * 24 * 60 * 60 # 10 years ################################# SIMPLEWIKI ################################### SIMPLE_WIKI_REQUIRE_LOGIN_EDIT = True SIMPLE_WIKI_REQUIRE_LOGIN_VIEW = False ################################# WIKI ################################### from course_wiki import settings as course_wiki_settings WIKI_ACCOUNT_HANDLING = False WIKI_EDITOR = 'course_wiki.editors.CodeMirror' WIKI_SHOW_MAX_CHILDREN = 0 # We don't use the little menu that shows children of an article in the breadcrumb WIKI_ANONYMOUS = False # Don't allow anonymous access until the styling is figured out WIKI_CAN_DELETE = course_wiki_settings.CAN_DELETE WIKI_CAN_MODERATE = course_wiki_settings.CAN_MODERATE WIKI_CAN_CHANGE_PERMISSIONS = course_wiki_settings.CAN_CHANGE_PERMISSIONS WIKI_CAN_ASSIGN = course_wiki_settings.CAN_ASSIGN WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False WIKI_LINK_LIVE_LOOKUPS = False WIKI_LINK_DEFAULT_LEVEL = 2 ##### Feedback submission mechanism ##### FEEDBACK_SUBMISSION_EMAIL = None ##### Zendesk ##### ZENDESK_URL = None ZENDESK_USER = None ZENDESK_API_KEY = None ##### shoppingcart Payment ##### PAYMENT_SUPPORT_EMAIL = 'payment@example.com' ##### Using cybersource by default ##### CC_PROCESSOR = { 'CyberSource': { 'SHARED_SECRET': '', 'MERCHANT_ID': '', 'SERIAL_NUMBER': '', 'ORDERPAGE_VERSION': '7', 'PURCHASE_ENDPOINT': '', } } # Setting for PAID_COURSE_REGISTRATION, DOES NOT AFFECT VERIFIED STUDENTS PAID_COURSE_REGISTRATION_CURRENCY = ['usd', '$'] # Members of this group are allowed to generate payment reports PAYMENT_REPORT_GENERATOR_GROUP = 'shoppingcart_report_access' ################################# open ended grading config ##################### #By setting up the default settings with an incorrect user name and password, # will get an error when attempting to connect OPEN_ENDED_GRADING_INTERFACE = { 'url': 'http://example.com/peer_grading', 'username': 'incorrect_user', 'password': 'incorrect_pass', 'staff_grading': 'staff_grading', 'peer_grading': 'peer_grading', 'grading_controller': 'grading_controller' } # Used for testing, debugging peer grading MOCK_PEER_GRADING = False # Used for testing, debugging staff grading MOCK_STAFF_GRADING = False ################################# Jasmine ################################### JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee' ################################# Waffle ################################### # Name prepended to cookies set by Waffle WAFFLE_COOKIE = "waffle_flag_%s" # Two weeks (in sec) WAFFLE_MAX_AGE = 1209600 ################################# Middleware ################################### # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'staticfiles.finders.FileSystemFinder', 'staticfiles.finders.AppDirectoriesFinder', 'pipeline.finders.PipelineFinder', ) # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'edxmako.makoloader.MakoFilesystemLoader', 'edxmako.makoloader.MakoAppDirectoriesLoader', # 'django.template.loaders.filesystem.Loader', # 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'request_cache.middleware.RequestCache', 'microsite_configuration.middleware.MicrositeMiddleware', 'django_comment_client.middleware.AjaxExceptionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', # Instead of AuthenticationMiddleware, we use a cached backed version #'django.contrib.auth.middleware.AuthenticationMiddleware', 'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware', 'student.middleware.UserStandingMiddleware', 'contentserver.middleware.StaticContentServer', 'crum.CurrentRequestUserMiddleware', # Adds user tags to tracking events # Must go before TrackMiddleware, to get the context set up 'user_api.middleware.UserTagsEventContextMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'track.middleware.TrackMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'splash.middleware.SplashMiddleware', # Allows us to dark-launch particular languages 'dark_lang.middleware.DarkLangMiddleware', 'embargo.middleware.EmbargoMiddleware', # Allows us to set user preferences # should be after DarkLangMiddleware 'lang_pref.middleware.LanguagePreferenceMiddleware', # Detects user-requested locale from 'accept-language' header in http request 'django.middleware.locale.LocaleMiddleware', 'django.middleware.transaction.TransactionMiddleware', # 'debug_toolbar.middleware.DebugToolbarMiddleware', 'django_comment_client.utils.ViewNameMiddleware', 'codejail.django_integration.ConfigureCodeJailMiddleware', # catches any uncaught RateLimitExceptions and returns a 403 instead of a 500 'ratelimitbackend.middleware.RateLimitMiddleware', # needs to run after locale middleware (or anything that modifies the request context) 'edxmako.middleware.MakoMiddleware', # For A/B testing 'waffle.middleware.WaffleMiddleware', # for expiring inactive sessions 'session_inactivity_timeout.middleware.SessionInactivityTimeout', # use Django built in clickjacking protection 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'course_wiki.middleware.WikiAccessMiddleware', ) # Clickjacking protection can be enabled by setting this to 'DENY' X_FRAME_OPTIONS = 'ALLOW' ############################### Pipeline ####################################### STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage' from rooted_paths import rooted_glob courseware_js = ( [ 'coffee/src/' + pth + '.js' for pth in ['courseware', 'histogram', 'navigation', 'time'] ] + sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/modules/**/*.js')) ) main_vendor_js = [ 'js/vendor/require.js', 'js/RequireJS-namespace-undefine.js', 'js/vendor/json2.js', 'js/vendor/jquery.min.js', 'js/vendor/jquery-ui.min.js', 'js/vendor/jquery.cookie.js', 'js/vendor/jquery.qtip.min.js', 'js/vendor/swfobject/swfobject.js', 'js/vendor/jquery.ba-bbq.min.js', 'js/vendor/ova/annotator-full.js', 'js/vendor/ova/annotator-full-firebase-auth.js', 'js/vendor/ova/video.dev.js', 'js/vendor/ova/vjs.youtube.js', 'js/vendor/ova/rangeslider.js', 'js/vendor/ova/share-annotator.js', 'js/vendor/ova/tinymce.min.js', 'js/vendor/ova/richText-annotator.js', 'js/vendor/ova/reply-annotator.js', 'js/vendor/ova/tags-annotator.js', 'js/vendor/ova/flagging-annotator.js', 'js/vendor/ova/jquery-Watch.js', 'js/vendor/ova/ova.js', 'js/vendor/ova/catch/js/catch.js', 'js/vendor/ova/catch/js/handlebars-1.1.2.js', 'js/vendor/URI.min.js', ] discussion_js = sorted(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/discussion/**/*.js')) staff_grading_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/staff_grading/**/*.js')) open_ended_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/open_ended/**/*.js')) notes_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/notes/**/*.js')) instructor_dash_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/instructor_dashboard/**/*.js')) PIPELINE_CSS = { 'style-vendor': { 'source_filenames': [ 'css/vendor/font-awesome.css', 'css/vendor/jquery.qtip.min.css', 'css/vendor/responsive-carousel/responsive-carousel.css', 'css/vendor/responsive-carousel/responsive-carousel.slide.css', 'css/vendor/ova/edx-annotator.css', 'css/vendor/ova/annotator.css', 'css/vendor/ova/video-js.min.css', 'css/vendor/ova/rangeslider.css', 'css/vendor/ova/share-annotator.css', 'css/vendor/ova/richText-annotator.css', 'css/vendor/ova/tags-annotator.css', 'css/vendor/ova/flagging-annotator.css', 'css/vendor/ova/ova.css', 'js/vendor/ova/catch/css/main.css' ], 'output_filename': 'css/lms-style-vendor.css', }, 'style-vendor-tinymce-content': { 'source_filenames': [ 'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css' ], 'output_filename': 'css/lms-style-vendor-tinymce-content.css', }, 'style-vendor-tinymce-skin': { 'source_filenames': [ 'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css' ], 'output_filename': 'css/lms-style-vendor-tinymce-skin.css', }, 'style-app': { 'source_filenames': [ 'sass/application.css', 'sass/ie.css' ], 'output_filename': 'css/lms-style-app.css', }, 'style-app-extend1': { 'source_filenames': [ 'sass/application-extend1.css', ], 'output_filename': 'css/lms-style-app-extend1.css', }, 'style-app-extend2': { 'source_filenames': [ 'sass/application-extend2.css', ], 'output_filename': 'css/lms-style-app-extend2.css', }, 'style-course-vendor': { 'source_filenames': [ 'js/vendor/CodeMirror/codemirror.css', 'css/vendor/jquery.treeview.css', 'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css', ], 'output_filename': 'css/lms-style-course-vendor.css', }, 'style-course': { 'source_filenames': [ 'sass/course.css', 'xmodule/modules.css', ], 'output_filename': 'css/lms-style-course.css', }, } common_js = set(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js) project_js = set(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js) # test_order: Determines the position of this chunk of javascript on # the jasmine test page PIPELINE_JS = { 'application': { # Application will contain all paths not in courseware_only_js 'source_filenames': sorted(common_js) + sorted(project_js) + [ 'js/form.ext.js', 'js/my_courses_dropdown.js', 'js/toggle_login_modal.js', 'js/sticky_filter.js', 'js/query-params.js', 'js/src/utility.js', 'js/src/accessibility_tools.js', ], 'output_filename': 'js/lms-application.js', 'test_order': 1, }, 'courseware': { 'source_filenames': courseware_js, 'output_filename': 'js/lms-courseware.js', 'test_order': 2, }, 'main_vendor': { 'source_filenames': main_vendor_js, 'output_filename': 'js/lms-main_vendor.js', 'test_order': 0, }, 'module-descriptor-js': { 'source_filenames': rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js'), 'output_filename': 'js/lms-module-descriptors.js', 'test_order': 8, }, 'module-js': { 'source_filenames': rooted_glob(COMMON_ROOT / 'static', 'xmodule/modules/js/*.js'), 'output_filename': 'js/lms-modules.js', 'test_order': 3, }, 'discussion': { 'source_filenames': discussion_js, 'output_filename': 'js/discussion.js', 'test_order': 4, }, 'staff_grading': { 'source_filenames': staff_grading_js, 'output_filename': 'js/staff_grading.js', 'test_order': 5, }, 'open_ended': { 'source_filenames': open_ended_js, 'output_filename': 'js/open_ended.js', 'test_order': 6, }, 'notes': { 'source_filenames': notes_js, 'output_filename': 'js/notes.js', 'test_order': 7 }, 'instructor_dash': { 'source_filenames': instructor_dash_js, 'output_filename': 'js/instructor_dash.js', 'test_order': 9, }, } PIPELINE_DISABLE_WRAPPER = True # Compile all coffee files in course data directories if they are out of date. # TODO: Remove this once we move data into Mongo. This is only temporary while # course data directories are still in use. if os.path.isdir(DATA_DIR): for course_dir in os.listdir(DATA_DIR): js_dir = DATA_DIR / course_dir / "js" if not os.path.isdir(js_dir): continue for filename in os.listdir(js_dir): if filename.endswith('coffee'): new_filename = os.path.splitext(filename)[0] + ".js" if os.path.exists(js_dir / new_filename): coffee_timestamp = os.stat(js_dir / filename).st_mtime js_timestamp = os.stat(js_dir / new_filename).st_mtime if coffee_timestamp <= js_timestamp: continue os.system("rm %s" % (js_dir / new_filename)) os.system("coffee -c %s" % (js_dir / filename)) PIPELINE_CSS_COMPRESSOR = None PIPELINE_JS_COMPRESSOR = None STATICFILES_IGNORE_PATTERNS = ( "sass/*", "coffee/*", # Symlinks used by js-test-tool "xmodule_js", "common_static", ) PIPELINE_YUI_BINARY = 'yui-compressor' # Setting that will only affect the edX version of django-pipeline until our changes are merged upstream PIPELINE_COMPILE_INPLACE = True ################################# CELERY ###################################### # Message configuration CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_MESSAGE_COMPRESSION = 'gzip' # Results configuration CELERY_IGNORE_RESULT = False CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True # Events configuration CELERY_TRACK_STARTED = True CELERY_SEND_EVENTS = True CELERY_SEND_TASK_SENT_EVENT = True # Exchange configuration CELERY_DEFAULT_EXCHANGE = 'edx.core' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' # Queues configuration HIGH_PRIORITY_QUEUE = 'edx.core.high' DEFAULT_PRIORITY_QUEUE = 'edx.core.default' LOW_PRIORITY_QUEUE = 'edx.core.low' HIGH_MEM_QUEUE = 'edx.core.high_mem' CELERY_QUEUE_HA_POLICY = 'all' CELERY_CREATE_MISSING_QUEUES = True CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE CELERY_QUEUES = { HIGH_PRIORITY_QUEUE: {}, LOW_PRIORITY_QUEUE: {}, DEFAULT_PRIORITY_QUEUE: {}, HIGH_MEM_QUEUE: {}, } # let logging work as configured: CELERYD_HIJACK_ROOT_LOGGER = False ################################ Bulk Email ################################### # Suffix used to construct 'from' email address for bulk emails. # A course-specific identifier is prepended. BULK_EMAIL_DEFAULT_FROM_EMAIL = 'no-reply@example.com' # Parameters for breaking down course enrollment into subtasks. BULK_EMAIL_EMAILS_PER_TASK = 100 BULK_EMAIL_EMAILS_PER_QUERY = 1000 # Initial delay used for retrying tasks. Additional retries use # longer delays. Value is in seconds. BULK_EMAIL_DEFAULT_RETRY_DELAY = 30 # Maximum number of retries per task for errors that are not related # to throttling. BULK_EMAIL_MAX_RETRIES = 5 # Maximum number of retries per task for errors that are related to # throttling. If this is not set, then there is no cap on such retries. BULK_EMAIL_INFINITE_RETRY_CAP = 1000 # We want Bulk Email running on the high-priority queue, so we define the # routing key that points to it. At the moment, the name is the same. BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE # Flag to indicate if individual email addresses should be logged as they are sent # a bulk email message. BULK_EMAIL_LOG_SENT_EMAILS = False # Delay in seconds to sleep between individual mail messages being sent, # when a bulk email task is retried for rate-related reasons. Choose this # value depending on the number of workers that might be sending email in # parallel, and what the SES rate is. BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = 0.02 ############################## Video ########################################## YOUTUBE = { # YouTube JavaScript API 'API': 'www.youtube.com/iframe_api', # URL to test YouTube availability 'TEST_URL': 'gdata.youtube.com/feeds/api/videos/', # Current youtube api for requesting transcripts. # For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g. 'TEXT_API': { 'url': 'video.google.com/timedtext', 'params': { 'lang': 'en', 'v': 'set_youtube_id_of_11_symbols_here', }, }, } ################################### APPS ###################################### INSTALLED_APPS = ( # Standard ones that are always installed... 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.humanize', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.sites', 'djcelery', 'south', # Database-backed configuration 'config_models', # Monitor the status of services 'service_status', # For asset pipelining 'edxmako', 'pipeline', 'staticfiles', 'static_replace', # Our courseware 'circuit', 'courseware', 'student', 'static_template_view', 'staticbook', 'track', 'eventtracking.django', 'util', 'certificates', 'dashboard', 'instructor', 'instructor_task', 'open_ended_grading', 'psychometrics', 'licenses', 'course_groups', 'bulk_email', # External auth (OpenID, shib) 'external_auth', 'django_openid_auth', # For the wiki 'wiki', # The new django-wiki from benjaoming 'django_notify', 'course_wiki', # Our customizations 'mptt', 'sekizai', #'wiki.plugins.attachments', 'wiki.plugins.links', 'wiki.plugins.notifications', 'course_wiki.plugins.markdownedx', # Foldit integration 'foldit', # For A/B testing 'waffle', # For testing 'django.contrib.admin', # only used in DEBUG mode 'django_nose', 'debug', # Discussion forums 'django_comment_client', 'django_comment_common', 'notes', # Splash screen 'splash', # Monitoring 'datadog', # User API 'rest_framework', 'user_api', # Shopping cart 'shoppingcart', # Notification preferences setting 'notification_prefs', # Different Course Modes 'course_modes', # Student Identity Verification 'verify_student', # Dark-launching languages 'dark_lang', # Microsite configuration 'microsite_configuration', # Student Identity Reverification 'reverification', 'embargo', # Monitoring functionality 'monitoring', ) ######################### MARKETING SITE ############################### EDXMKTG_COOKIE_NAME = 'edxloggedin' MKTG_URLS = {} MKTG_URL_LINK_MAP = { 'ABOUT': 'about_edx', 'CONTACT': 'contact', 'FAQ': 'help_edx', 'COURSES': 'courses', 'ROOT': 'root', 'TOS': 'tos', 'HONOR': 'honor', 'PRIVACY': 'privacy_edx', 'JOBS': 'jobs', 'PRESS': 'press', # Verified Certificates 'WHAT_IS_VERIFIED_CERT': 'verified-certificate', } ################# Student Verification ################# VERIFY_STUDENT = { "DAYS_GOOD_FOR": 365, # How many days is a verficiation good for? } ### This enables the Metrics tab for the Instructor dashboard ########### FEATURES['CLASS_DASHBOARD'] = False if FEATURES.get('CLASS_DASHBOARD'): INSTALLED_APPS += ('class_dashboard',) ######################## CAS authentication ########################### if FEATURES.get('AUTH_USE_CAS'): CAS_SERVER_URL = 'https://provide_your_cas_url_here' AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'django_cas.backends.CASBackend', ) INSTALLED_APPS += ('django_cas',) MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',) ###################### Registration ################################## # For each of the fields, give one of the following values: # - 'required': to display the field, and make it mandatory # - 'optional': to display the field, and make it non-mandatory # - 'hidden': to not display the field REGISTRATION_EXTRA_FIELDS = { 'level_of_education': 'optional', 'gender': 'optional', 'year_of_birth': 'optional', 'mailing_address': 'optional', 'goals': 'optional', 'honor_code': 'required', 'city': 'hidden', 'country': 'hidden', } ########################## CERTIFICATE NAME ######################## CERT_NAME_SHORT = "Certificate" CERT_NAME_LONG = "Certificate of Achievement" ###################### Grade Downloads ###################### GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE GRADES_DOWNLOAD = { 'STORAGE_TYPE': 'localfs', 'BUCKET': 'edx-grades', 'ROOT_PATH': '/tmp/edx-s3/grades', } ######################## PROGRESS SUCCESS BUTTON ############################## # The following fields are available in the URL: {course_id} {student_id} PROGRESS_SUCCESS_BUTTON_URL = 'http://<domain>/<path>/{course_id}' PROGRESS_SUCCESS_BUTTON_TEXT_OVERRIDE = None #### PASSWORD POLICY SETTINGS ##### PASSWORD_MIN_LENGTH = None PASSWORD_MAX_LENGTH = None PASSWORD_COMPLEXITY = {} PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None PASSWORD_DICTIONARY = [] ##################### LinkedIn ##################### INSTALLED_APPS += ('django_openid_auth',) ############################ LinkedIn Integration ############################# INSTALLED_APPS += ('linkedin',) LINKEDIN_API = { 'EMAIL_WHITELIST': [], 'COMPANY_ID': '2746406', } ##### ACCOUNT LOCKOUT DEFAULT PARAMETERS ##### MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5 MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60 ##### LMS DEADLINE DISPLAY TIME_ZONE ####### TIME_ZONE_DISPLAYED_FOR_DEADLINES = 'UTC' # Source: # http://loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt according to http://en.wikipedia.org/wiki/ISO_639-1 ALL_LANGUAGES = ( [u"aa", u"Afar"], [u"ab", u"Abkhazian"], [u"af", u"Afrikaans"], [u"ak", u"Akan"], [u"sq", u"Albanian"], [u"am", u"Amharic"], [u"ar", u"Arabic"], [u"an", u"Aragonese"], [u"hy", u"Armenian"], [u"as", u"Assamese"], [u"av", u"Avaric"], [u"ae", u"Avestan"], [u"ay", u"Aymara"], [u"az", u"Azerbaijani"], [u"ba", u"Bashkir"], [u"bm", u"Bambara"], [u"eu", u"Basque"], [u"be", u"Belarusian"], [u"bn", u"Bengali"], [u"bh", u"Bihari languages"], [u"bi", u"Bislama"], [u"bs", u"Bosnian"], [u"br", u"Breton"], [u"bg", u"Bulgarian"], [u"my", u"Burmese"], [u"ca", u"Catalan"], [u"ch", u"Chamorro"], [u"ce", u"Chechen"], [u"zh", u"Chinese"], [u"cu", u"Church Slavic"], [u"cv", u"Chuvash"], [u"kw", u"Cornish"], [u"co", u"Corsican"], [u"cr", u"Cree"], [u"cs", u"Czech"], [u"da", u"Danish"], [u"dv", u"Divehi"], [u"nl", u"Dutch"], [u"dz", u"Dzongkha"], [u"en", u"English"], [u"eo", u"Esperanto"], [u"et", u"Estonian"], [u"ee", u"Ewe"], [u"fo", u"Faroese"], [u"fj", u"Fijian"], [u"fi", u"Finnish"], [u"fr", u"French"], [u"fy", u"Western Frisian"], [u"ff", u"Fulah"], [u"ka", u"Georgian"], [u"de", u"German"], [u"gd", u"Gaelic"], [u"ga", u"Irish"], [u"gl", u"Galician"], [u"gv", u"Manx"], [u"el", u"Greek"], [u"gn", u"Guarani"], [u"gu", u"Gujarati"], [u"ht", u"Haitian"], [u"ha", u"Hausa"], [u"he", u"Hebrew"], [u"hz", u"Herero"], [u"hi", u"Hindi"], [u"ho", u"Hiri Motu"], [u"hr", u"Croatian"], [u"hu", u"Hungarian"], [u"ig", u"Igbo"], [u"is", u"Icelandic"], [u"io", u"Ido"], [u"ii", u"Sichuan Yi"], [u"iu", u"Inuktitut"], [u"ie", u"Interlingue"], [u"ia", u"Interlingua"], [u"id", u"Indonesian"], [u"ik", u"Inupiaq"], [u"it", u"Italian"], [u"jv", u"Javanese"], [u"ja", u"Japanese"], [u"kl", u"Kalaallisut"], [u"kn", u"Kannada"], [u"ks", u"Kashmiri"], [u"kr", u"Kanuri"], [u"kk", u"Kazakh"], [u"km", u"Central Khmer"], [u"ki", u"Kikuyu"], [u"rw", u"Kinyarwanda"], [u"ky", u"Kirghiz"], [u"kv", u"Komi"], [u"kg", u"Kongo"], [u"ko", u"Korean"], [u"kj", u"Kuanyama"], [u"ku", u"Kurdish"], [u"lo", u"Lao"], [u"la", u"Latin"], [u"lv", u"Latvian"], [u"li", u"Limburgan"], [u"ln", u"Lingala"], [u"lt", u"Lithuanian"], [u"lb", u"Luxembourgish"], [u"lu", u"Luba-Katanga"], [u"lg", u"Ganda"], [u"mk", u"Macedonian"], [u"mh", u"Marshallese"], [u"ml", u"Malayalam"], [u"mi", u"Maori"], [u"mr", u"Marathi"], [u"ms", u"Malay"], [u"mg", u"Malagasy"], [u"mt", u"Maltese"], [u"mn", u"Mongolian"], [u"na", u"Nauru"], [u"nv", u"Navajo"], [u"nr", u"Ndebele, South"], [u"nd", u"Ndebele, North"], [u"ng", u"Ndonga"], [u"ne", u"Nepali"], [u"nn", u"Norwegian Nynorsk"], [u"nb", u"Bokmål, Norwegian"], [u"no", u"Norwegian"], [u"ny", u"Chichewa"], [u"oc", u"Occitan"], [u"oj", u"Ojibwa"], [u"or", u"Oriya"], [u"om", u"Oromo"], [u"os", u"Ossetian"], [u"pa", u"Panjabi"], [u"fa", u"Persian"], [u"pi", u"Pali"], [u"pl", u"Polish"], [u"pt", u"Portuguese"], [u"ps", u"Pushto"], [u"qu", u"Quechua"], [u"rm", u"Romansh"], [u"ro", u"Romanian"], [u"rn", u"Rundi"], [u"ru", u"Russian"], [u"sg", u"Sango"], [u"sa", u"Sanskrit"], [u"si", u"Sinhala"], [u"sk", u"Slovak"], [u"sl", u"Slovenian"], [u"se", u"Northern Sami"], [u"sm", u"Samoan"], [u"sn", u"Shona"], [u"sd", u"Sindhi"], [u"so", u"Somali"], [u"st", u"Sotho, Southern"], [u"es", u"Spanish"], [u"sc", u"Sardinian"], [u"sr", u"Serbian"], [u"ss", u"Swati"], [u"su", u"Sundanese"], [u"sw", u"Swahili"], [u"sv", u"Swedish"], [u"ty", u"Tahitian"], [u"ta", u"Tamil"], [u"tt", u"Tatar"], [u"te", u"Telugu"], [u"tg", u"Tajik"], [u"tl", u"Tagalog"], [u"th", u"Thai"], [u"bo", u"Tibetan"], [u"ti", u"Tigrinya"], [u"to", u"Tonga (Tonga Islands)"], [u"tn", u"Tswana"], [u"ts", u"Tsonga"], [u"tk", u"Turkmen"], [u"tr", u"Turkish"], [u"tw", u"Twi"], [u"ug", u"Uighur"], [u"uk", u"Ukrainian"], [u"ur", u"Urdu"], [u"uz", u"Uzbek"], [u"ve", u"Venda"], [u"vi", u"Vietnamese"], [u"vo", u"Volapük"], [u"cy", u"Welsh"], [u"wa", u"Walloon"], [u"wo", u"Wolof"], [u"xh", u"Xhosa"], [u"yi", u"Yiddish"], [u"yo", u"Yoruba"], [u"za", u"Zhuang"], [u"zu", u"Zulu"] ) ### Apps only installed in some instances OPTIONAL_APPS = ( 'edx_jsdraw', 'mentoring', # edx-ora2 'submissions', 'openassessment', 'openassessment.assessment', 'openassessment.workflow', 'openassessment.xblock' ) for app_name in OPTIONAL_APPS: # First attempt to only find the module rather than actually importing it, # to avoid circular references - only try to import if it can't be found # by find_module, which doesn't work with import hooks try: imp.find_module(app_name) except ImportError: try: __import__(app_name) except ImportError: continue INSTALLED_APPS += (app_name,) # Stub for third_party_auth options. # See common/djangoapps/third_party_auth/settings.py for configuration details. THIRD_PARTY_AUTH = {} ### ADVANCED_SECURITY_CONFIG # Empty by default ADVANCED_SECURITY_CONFIG = {}
agpl-3.0
mathjazz/pontoon
pontoon/db/tests/test_lookups.py
3
1775
""" Collation lookups allow a user to set text-collation to search queries. """ import pytest from pontoon.db import IContainsCollate # noqa from pontoon.base.models import Entity from pontoon.base.tests import EntityFactory @pytest.fixture @pytest.mark.django_db def collation_entities(): # Create a list of instances in order to filter them. EntityFactory.create_batch(10, string="qwertyuiop") @pytest.mark.django_db def test_empty_locale(collation_entities): """Lookup won't add an empty collation to a sql query.""" entities = Entity.objects.filter(string__icontains_collate=("qwertyuiop", "")) query_sql = entities.query.sql_with_params()[0] # Force evaluation of query on the real database. assert entities.count() == 10 assert "COLLATE" not in query_sql @pytest.mark.django_db def test_arguments_are_in_tuple(collation_entities): """Check if lookup validates missing collation properly.""" with pytest.raises(ValueError): Entity.objects.filter(string__icontains_collate="st") @pytest.mark.django_db def test_invalid_number_of_arguments(collation_entities): """Validate a number of arguments.""" with pytest.raises(ValueError): Entity.objects.filter(string__icontains_collate=("qwertyuiop", "a", "b")) @pytest.mark.django_db def test_collation_query(collation_entities): """Check if collate is applied to a given lookup.""" entities = Entity.objects.filter(string__icontains_collate=("qwertyuiop", "C")) query_sql = entities.query.sql_with_params()[0] # Force evaluation of query on the real database. assert entities.count() == 10 assert query_sql.endswith( 'WHERE UPPER("base_entity"."string"::text COLLATE "C") ' 'LIKE UPPER(%s COLLATE "C")' )
bsd-3-clause
chen0031/Dato-Core
src/unity/python/graphlab/test/test_sarray_sketch.py
13
11788
''' Copyright (C) 2015 Dato, Inc. All rights reserved. This software may be modified and distributed under the terms of the BSD license. See the DATO-PYTHON-LICENSE file for details. ''' # from nose import with_setup # -*- coding: utf-8 -*- from graphlab.data_structures.sarray import SArray import pandas as pd import numpy as np import unittest import random import copy import os import math import shutil import array import util import time import itertools ####################################################### # Metrics tracking tests are in test_usage_metrics.py # ####################################################### class SArraySketchTest(unittest.TestCase): def setUp(self): pass def __validate_sketch_result(self, sketch, sa, delta = 1E-7): df = pd.DataFrame(list(sa.dropna())) pds = pd.Series(list(sa.dropna())) if (sa.dtype() == int or sa.dtype() == float): if (len(sa) == 0): self.assertTrue(math.isnan(sketch.min())) self.assertTrue(math.isnan(sketch.min())) self.assertEquals(sketch.sum(), 0.0) self.assertEquals(sketch.mean(), 0.0) self.assertEquals(sketch.var(), 0.0) self.assertEquals(sketch.std(), 0.0) else: self.assertEquals(sketch.min(), sa.min()) self.assertEquals(sketch.max(), sa.max()) self.assertEquals(sketch.sum(), sa.sum()) self.assertAlmostEqual(sketch.mean(), sa.dropna().mean(), delta=delta) self.assertAlmostEqual(sketch.var(), sa.dropna().var(), delta=delta) self.assertAlmostEqual(sketch.std(), sa.dropna().std(), delta=delta) self.assertAlmostEqual(sketch.quantile(0.5), df.quantile(0.5)[0], delta=1) self.assertEqual(sketch.quantile(0), df.quantile(0)[0]) self.assertEqual(sketch.quantile(1), df.quantile(1)[0]) self.assertEqual(sketch.frequent_items(), SArray(pds).sketch_summary().frequent_items()) for item in pds.value_counts().index: self.assertEqual(sketch.frequency_count(item), pds.value_counts()[item]) self.assertAlmostEqual(sketch.num_unique(), len(sa.unique()), delta=3) else: with self.assertRaises(RuntimeError): sketch.quantile((0.5)) self.assertEqual(sketch.num_undefined(), sa.num_missing()) self.assertEqual(sketch.size(), len(sa)) self.assertEqual(sketch.sketch_ready(), True) self.assertEqual(sketch.num_elements_processed(), sketch.size()) def __validate_nested_sketch_result(self, sa): sketch = sa.sketch_summary() self.__validate_sketch_result(sketch, sa) # element length summary t = sketch.element_length_summary() len_sa = sa.dropna().item_length() self.__validate_sketch_result(t, len_sa) def test_sketch_int(self): int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, None] sa = SArray(data=int_data) self.__validate_sketch_result(sa.sketch_summary(), sa) def test_sketch_float(self): int_data = [1.2, 3,.4, 6.789, None] sa = SArray(data=int_data) self.__validate_sketch_result(sa.sketch_summary(), sa) def test_vector_sketch(self): vector_data = [[], [1,2], [3], [4,5,6,7], [8,9,10], None] sa = SArray(data=vector_data) sketch = sa.sketch_summary(); self.__validate_sketch_result(sketch, sa) self.__validate_sketch_result(sketch.element_length_summary(), sa.dropna().item_length()) flattened = list(itertools.chain.from_iterable(list(sa.dropna()))) self.__validate_sketch_result(sketch.element_summary(), SArray(flattened)) fi = sketch.frequent_items() self.assertEqual(len(fi), 5) self.assertEqual((fi['[1 2]']), 1) self.assertEqual((fi['[4 5 6 7]']), 1) # sub sketch with one key s = sa.sketch_summary(sub_sketch_keys = 1).element_sub_sketch(1) expected = sa.vector_slice(1) self.__validate_sketch_result(s, expected) # sub sketch with multiple keys keys = [1,3] s = sa.sketch_summary(sub_sketch_keys = keys).element_sub_sketch(keys) self.assertEqual(len(s), len(keys)) for key in keys: self.assertTrue(s.has_key(key)) expected = sa.vector_slice(key) self.__validate_sketch_result(s[key], expected) indexes = range(0,10) s = sa.sketch_summary(sub_sketch_keys = indexes).element_sub_sketch() self.assertEqual(len(s), len(indexes)) def test_list_sketch(self): list_data = [[], [1,2],[1,2], ['a', 'a', 'a', 'b'], [ 1 ,1 , 2], None] sa = SArray(list_data) self.__validate_nested_sketch_result(sa) sketch = sa.sketch_summary(); self.assertEqual(sketch.num_unique(), 4) element_summary = sketch.element_summary() another_rep = list(itertools.chain.from_iterable(list(sa.dropna()))) self.__validate_sketch_result(element_summary, SArray(another_rep, str)) fi = sketch.frequent_items() self.assertEqual(len(fi), 4) self.assertEqual((fi['[1,2]']), 2) self.assertEqual((fi['["a","a","a","b"]']), 1) def test_dict_sketch_int_value(self): dict_data = [{}, {'a':1, 'b':2}, {'a':1, 'b':2}, {'a':3, 'c':1}, {'a': 1, 'b': 2, 'c': 3}, None] sa = SArray(data=dict_data) self.__validate_nested_sketch_result(sa) sketch = sa.sketch_summary() self.assertEqual(sketch.num_unique(), 4) fi = sketch.frequent_items() self.assertEqual(len(fi), 4) self.assertEqual((fi['{"a":1, "b":2}']), 2) self.assertEqual((fi['{"a":3, "c":1}']), 1) # Get dict key sketch key_summary = sketch.dict_key_summary() another_rep = list(itertools.chain.from_iterable(list(sa.dict_keys().dropna()))) self.__validate_sketch_result(key_summary, SArray(another_rep)) # Get dict value sketch value_summary = sketch.dict_value_summary() another_rep = list(itertools.chain.from_iterable(list(sa.dict_values().dropna()))) self.__validate_sketch_result(value_summary, SArray(another_rep)) # sub sketch with one key s = sa.sketch_summary(sub_sketch_keys ='a').element_sub_sketch('a') expected = sa.unpack(column_name_prefix="")['a'] self.__validate_sketch_result(s, expected) s = sa.sketch_summary(sub_sketch_keys ='Nonexist').element_sub_sketch('Nonexist') self.assertEqual(s.num_undefined(), len(sa)) # sub sketch with multiple keys keys = ['a', 'b'] s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch(keys) self.assertEqual(len(s), len(keys)) for key in keys: self.assertTrue(s.has_key(key)) expected = sa.unpack(column_name_prefix="")[key] self.__validate_sketch_result(s[key], expected) def test_dict_sketch_str_value(self): # Dict value sketch type should be auto inferred dict_data = [{'a':'b', 'b':'c'}, {'a':'b', 'b':'c'}, {'a':'d', 'b':'4'}, None] sa = SArray(data=dict_data) self.__validate_nested_sketch_result(sa) sketch = sa.sketch_summary() fi = sketch.frequent_items() self.assertEqual(len(fi), 2) self.assertEqual(fi['{"a":"b", "b":"c"}'], 2) self.assertEqual(fi['{"a":"d", "b":"4"}'], 1) # Get dict key sketch key_summary = sketch.dict_key_summary() another_rep = list(itertools.chain.from_iterable(list(sa.dict_keys().dropna()))) self.__validate_sketch_result(key_summary, SArray(another_rep)) # Get dict value sketch value_summary = sketch.dict_value_summary() another_rep = list(itertools.chain.from_iterable(list(sa.dict_values().dropna()))) self.__validate_sketch_result(value_summary, SArray(another_rep)) # sub sketch with one key s = sa.sketch_summary(sub_sketch_keys ='a').element_sub_sketch('a') expected = sa.unpack(column_name_prefix="")['a'] self.__validate_sketch_result(s, expected) s = sa.sketch_summary(sub_sketch_keys ='Nonexist').element_sub_sketch('Nonexist') self.assertEqual(s.num_undefined(), len(sa)) # sub sketch with multiple keys keys = ['a', 'b'] s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch(keys) self.assertEqual(len(s), len(keys)) for key in keys: self.assertTrue(s.has_key(key)) expected = sa.unpack(column_name_prefix="")[key] self.__validate_sketch_result(s[key], expected) # allow pass in empty keys, which will retrieve all keys s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch() self.assertEqual(len(s), len(keys)) for key in keys: self.assertTrue(s.has_key(key)) expected = sa.unpack(column_name_prefix="")[key] self.__validate_sketch_result(s[key], expected) def test_str_sketch(self): str_data = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", None] sa = SArray(data=str_data) sketch = sa.sketch_summary() with self.assertRaises(RuntimeError): sketch.min() with self.assertRaises(RuntimeError): sketch.max() with self.assertRaises(RuntimeError): sketch.sum() with self.assertRaises(RuntimeError): sketch.mean() with self.assertRaises(RuntimeError): sketch.var() with self.assertRaises(RuntimeError): sketch.std() self.assertAlmostEqual(sketch.num_unique(), 10, delta=3) self.assertEqual(sketch.num_undefined(), 1) self.assertEqual(sketch.size(), len(str_data)) with self.assertRaises(RuntimeError): sketch.quantile(0.5) self.assertEqual(sketch.frequency_count("1"), 1) self.assertEqual(sketch.frequency_count("2"), 1) t = sketch.frequent_items() self.assertEqual(len(t), 10) def test_empty_sketch(self): int_data = [] sa = SArray(data=int_data) sketch = sa.sketch_summary() self.assertTrue(math.isnan(sketch.min())) self.assertTrue(math.isnan(sketch.max())) self.assertEquals(sketch.sum(), 0) self.assertEqual(sketch.mean(), 0) self.assertEqual(sketch.var(), 0) self.assertEqual(sketch.std(), 0) self.assertEqual(sketch.num_unique(), 0) self.assertEqual(sketch.num_undefined(),0) self.assertEqual(sketch.size(), 0) with self.assertRaises(RuntimeError): sketch.quantile(0.5) t = sketch.frequent_items() self.assertEqual(len(t), 0) def test_background_sketch(self): dict_data = [{str(i):1} for i in range(1,10000)] sa = SArray(dict_data) s = sa.sketch_summary(background=True, sub_sketch_keys=[str(i ) for i in range(100,200)]) s.sketch_ready() # cannot check the actual value as it depends on the speed of processing t = s.element_sub_sketch([str(i) for i in range(100, 105)]) self.assertEqual(len(t), 5) def test_large_value_sketch(self): sa = SArray([1234567890 for i in range(100)]) sk = sa.sketch_summary(); self.__validate_sketch_result(sa.sketch_summary(), sa, 1E-5) def test_cancelation(self): sa = SArray(range(1,10000)) s = sa.sketch_summary(background=True) s.cancel() # this can be rather non-deterministic, so there is very little # real output validation that can be done...
agpl-3.0
PawarPawan/h2o-v3
py2/testdir_rapids/test_rapids_basic.py
21
10673
import unittest, random, sys, time sys.path.extend(['.','..','../..','py']) import h2o2 as h2o import h2o_browse as h2b, h2o_exec as h2e, h2o_import as h2i # ! is only needed if there is no indexing initList = [ # weird cases with lhs # can only index v if it already exists. is the lhs addition before or after the indexed assign # if before, what does it add to. If after, does it add to the full key? # must be before. '(= ([ (+ #5 ([ %v "null" "null")) {#0;#1;#2;#3;#4} #0) ([ %v (: #4 #8) #0))', '(= ([ (+ #5 ([ %v "null" #0)) {#0;#1;#2;#3;#4} #0) ([ %v (: #4 #8) #0))', # wrong row count? # '(= ([ (+ #5 ([ %v #0 "null")) {#0;#1;#2;#3;#4} #0) ([ %v (: #4 #8) #0))', # fails with exception # '(= ([ (+ #5 ([ %v #0 #0)) {#0;#1;#2;#3;#4} #0) ([ %v (: #4 #8) #0))', # weird cases with lhs # dont need a rhs but should still modify? # does this add 5 to v and then nothing? # why does this fail? # '([ (+ #5 ([ %v "null" "null")) #0)', # why does this fail? # '([ (+ #6 ([ %v "null" #0)) #0)', # '([ (+ #7 ([ %v #0)) #0))', '(not (* #2 #2))', '(= !a (not (* #2 #2)))', '(+ (* #2 #2) (* #5 #5))', '(* #1 (+ (* #2 #2) (* #5 #5)))', '(= !x (c {#1;#5;#8;#10;#33}))', '(= !x (c {(: #0 #5) }))', '(= !x (c {(: #5 #5) }))', # why is num_rows = -4 here? Will blow up if we use it? # '(= !x (c {(: #5 #0) }))', '(= !v (c {#1;#4567;(: #9 #90);(: #9 #45);#450})', '(= !v2 (+ %v %v))', # FIX! test with space after { and before } '(= !v (c {#1;#4567;(: #91234 #9000209);(: #9000210 #45001045);#45001085})', '(= !v (c {#1;#4567;(: #91234 #9000209);(: #9000210 #45001045);45001085})', # remember need %v to reference '(= !v (c {#1;#4567;(: #9 #90);(: #9 #45);#450})', '(= !v2 %v )', '(= !v2 (n %v %v))', '(= !v2 (N %v %v))', '(= !v2 (- %v %v))', '(= !v2 (+ %v %v))', '(= !v2 (sum (+ %v %v) %TRUE)', '(= !v2 (+ #1.0 (sum (+ %v %v) %TRUE))', # different dimensions? '(= !v3 (+ %v (sum (+ %v %v) %TRUE))', '(= !v3 (cbind %v %v %v %v))', # '(= !v3 (rbind %v %v %v %v))', # '(= !keys (ls))', # works # '(= !x #1)', # works # '(= !x (sum ([ %v "null" #0) %TRUE))', # works # '(= !x (sum ([ v "null" (: #0 #0)) %TRUE))', # bad v # '(= !x (xorsum ([ %v "null" #0) %TRUE))', # works # 'a', # AAIOBE # 'x', # AAIOBE # 'c', # AAIOBE # 'c(1)', # says 'c(1' is unimplemented # '(= #1)', # AAIOBE # '(= !x #1)', # works # 'x=c(1.3,0,1,2,3,4,5)', # says 'x=c(1.3,0,1,2,3,4,5' is unimplemented # 'x=c(1.3', # AAIOBE # '()', # Unimplemented on token '' # '(x)', # unimplemented on x # '(= !x)', # AAIOBE # '(= !x ())', # unimplemented # '(= !x #1)', # works # '(= !x #1 #2)', # works, answer is 1? # '(= !x (cbind (#1 #2) %TRUE))', # ClassCast exception # '(= !x (cbind (#1 #2)))', # ClassCast exception # '(= !x (cbind (#1)))', # ClassCast exception # '(= !x (cbind #1))', # ClassCast exception # '(= !x (seq (#1, #2)) )', # number format exception # '(= !x (seq (#1, #2)) )', # bad # '(= !x (seq #1, #2) )', # bad # '(= !x (seq (#1) )', # bad # '(= !x #1; = !x #2)', # no error but why answer is 1? # '(= !x #1) (=!x #2)', # no error but why answer is 1? # '{(= !x #1); (=!y %x)', # AAIOBE # '{(= !x #1)', # AAIOBE # '({(= !x #1); (= !y #1))', # AAIOBE # '(1)', # '((1))', # '(((1)))', '(#(1))', # why isn't this illegal? '(#1)', '((#1))', '(((#1)))', '(= !x #1)', '((= !x #1))', '(((= !x #1)))', # complains # '(= !x (#1 #2))', # '((= !x (#1 #2)))', # '(((= !x (#1 #2))))', # okay. not okay if comma separated. seems wrong '(= !x (+ #1 #2))', '((= !x (+ #1 #2)))', '(((= !x (+ #1 #2))))', # complains # '(= !x (+ #1 #2 #4))', # '((= !x (+ #1 #2 #4)))', # '(((= !x (+ #1 #2 #4))))', # okay. '(= !x + #1 #2)', '((= !x + #1 #2))', '(((= !x + #1 #2)))', # '(= x + #1 #2)', # fails # parens on binary operators '(= !x + #1 + #1 (+ #1 #1))', '= !x + #1 + #1 (+ #1 #1)', '= !x N #1 N #1 (N #1 #1)', '= !x n #1 n #1 (n #1 #1)', '= !x L #1 L #1 (L #1 #1)', '= !x l #1 l #1 (l #1 #1)', '= !x G #1 G #1 (G #1 #1)', '= !x g #1 g #1 (g #1 #1)', '= !x (* (* #1 #1) (* #1 #1))', '= !x * #1 * #1 (* #1 #1)', '= !x - #1 - #1 (- #1 #1)', '= !x ^ #1 ^ #1 (^ #1 #1)', '= !x / #1 / #1 (/ #1 #1)', '= !x ** #1 ** #1 (** #1 #1)', # FIX! what is modulo now # '= !x % #1 % #1 (% #1 #1)', # '= !x %/% #1 %/% #1 %/% #1 #1', # unimplemented # '= !x %% #1 %% #1 %% #1 #1', # unimplemented # '(= !x + _#1 + _#1 + _#1 _#1)', # unimplemented # '= !x _ + #1 + #1 (+ #1 _ #1)', # '= !x _ N #1 N #1 (N #1 _ #1)', # '= !x _ n #1 n #1 (n #1 _ #1)', # '= !x _ L #1 L #1 (L #1 _ #1)', # '= !x _ l #1 l #1 (l #1 _ #1)', # '= !x _ G #1 G #1 (G #1 _ #1)', # '= !x _ g #1 g #1 (g #1 _ #1)', # '= !x _ * #1 * #1 (* #1 _ #1)', # '= !x _ - #1 - #1 (- #1 _ #1)', # '= !x _ ^ #1 ^ #1 (^ #1 _ #1)', # '= !x _ / #1 / #1 (/ #1 _ #1)', # '= !x _ ** #1 ** #1 (** #1 _ #1)', # '= !x _ % #1 % #1 (% #1 _ #1)', # can have space between ( and function '= !x1 ( sum ([ %v "null" #0) %TRUE)', '= !x2 ( sum ([ %v "null" #0) %TRUE)', '= !x2a ( sum ([ %v "null" #0) %TRUE )', # can have space after ( '= !x3 ( sum ([ %v "null" #0) %TRUE )', '= !x3a ( sum ([ %v "null" #0) %TRUE )', '= !x3b ( sum ([ %v "null" #0 ) %TRUE )', '= !x4 ( sum ([ %v " null " #0 ) %TRUE )', # can have space after ( '(= !x3 ( sum ([ %v "null" #0) %TRUE ))', '(= !x3a ( sum ([ %v "null" #0) %TRUE ) )', '(= !x3b ( sum ([ %v "null" #0 ) %TRUE ) )', '((= !x4 ( sum ([ %v " null " #0 ) %TRUE )))', '(= !x3 ( max ([ %v "null" #0) %TRUE ))', '(= !x3a ( max ([ %v "null" #0) %TRUE ) )', '(= !x3b ( max ([ %v "null" #0 ) %TRUE ) )', '((= !x4 ( max ([ %v " null " #0 ) %TRUE )))', '(= !x3 ( min ([ %v "null" #0) %TRUE ))', '(= !x3a ( min ([ %v "null" #0) %TRUE ) )', '(= !x3b ( min ([ %v "null" #0 ) %TRUE ) )', '((= !x4 ( min ([ %v " null " #0 ) %TRUE )))', '(= !x3 ( min ([ %v "null" #0) %TRUE ))', '(= !x3 (+ (sum ([ %v "null" #0) %TRUE) (sum ([ %v "null" #0) %TRUE) )', '(= !x3 (+ (xorsum ([ %v "null" #0) %TRUE) (xorsum ([ %v "null" #0) %TRUE) )', # FIX! these should be like sum # '(= !x3 (+ (max ([ %v "null" #0) %TRUE) (max ([ %v "null" #0) %TRUE) )', # '(= !x3 (+ (min ([ %v "null" #0) %TRUE) (min ([ %v "null" #0) %TRUE) )', # '{ #1 #1 }', # '(= !x4 { #1 #1 })', # v[c(1,5,8,10,33),] # commas are illegal (var name?) # vectors can be strings or numbers only, not vars or keys # h2o objects can't be in a vector # c(1,2,3,4) # '= !x (sum %v )' # '(= !x (xorsum ([ %v "null" #0) %TRUE))', # works # 'cave=c(1.3,0,1,2,3,4,5)', # 'ma=c(2.3,0,1,2,3,4,5)', # 'r2.hex=c(3.3,0,1,2,3,4,5)', # 'r3.hex=c(4.3,0,1,2,3,4,5)', # 'r4.hex=c(5.3,0,1,2,3,4,5)', # 'r.hex=i.hex', ] exprList = [ "round(r.hex[,1],0)", "round(r.hex[,1],1)", "round(r.hex[,1],2)", # "signif(r.hex[,1],-1)", # "signif(r.hex[,1],0)", "signif(r.hex[,1],1)", "signif(r.hex[,1],2)", "signif(r.hex[,1],22)", "trunc(r.hex[,1])", "trunc(r.hex[,1])", "trunc(r.hex[,1])", "trunc(r.hex[,1])", ## Compute row and column sums for a matrix: # 'x <- cbind(x1 = 3, x2 = c(4:1, 2:5))', # 'dimnames(x)[[1]] <- letters[1:8]', # 'apply(x, 2, mean, trim = .2)', 'apply(x, 2, mean)', 'col.sums <- apply(x, 2, sum)', 'row.sums <- apply(x, 1, sum)', # 'rbind(cbind(x, Rtot = row.sums), Ctot = c(col.sums, sum(col.sums)))', # 'stopifnot( apply(x, 2, is.vector))', ## Sort the columns of a matrix # 'apply(x, 2, sort)', ##- function with extra args: # 'cave <- function(x, c1, c2) c(mean(x[c1]), mean(x[c2]))', # 'apply(x, 1, cave, c1 = "x1", c2 = c("x1","x2"))', # 'ma <- matrix(c(1:4, 1, 6:8), nrow = 2)', 'ma', # fails unimplemented # 'apply(ma, 1, table)', #--> a list of length 2 # 'apply(ma, 1, stats::quantile)', # 5 x n matrix with rownames #'stopifnot(dim(ma) == dim(apply(ma, 1:2, sum)))', ## Example with different lengths for each call # 'z <- array(1:24, dim = 2:4)', # 'zseq <- apply(z, 1:2, function(x) seq_len(max(x)))', # 'zseq', ## a 2 x 3 matrix # 'typeof(zseq)', ## list # 'dim(zseq)', ## 2 3 # zseq[1,]', # 'apply(z, 3, function(x) seq_len(max(x)))', # a list without a dim attribute ] class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): global SEED SEED = h2o.setup_random_seed() h2o.init(1, base_port=54333) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_rapids_basic(self): bucket = 'smalldata' csvPathname = 'iris/iris_wheader.csv' hexKey = 'v' parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hexKey) keys = [] for execExpr in initList: execResult, result = h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=4) # rows might be zero! if execResult['num_rows'] or execResult['num_cols']: keys.append(execExpr) print "\nExpressions that created keys" for k in keys: print k # for execExpr in exprList: # h2e.exec_expr(execExpr=execExpr, resultKey=None, timeoutSecs=10) h2o.check_sandbox_for_errors() if __name__ == '__main__': h2o.unit_main()
apache-2.0
shravan-shandilya/web3.py
tests/utilities/test_abi_filtering_by_argument_name.py
2
1462
import pytest from web3.utils.abi import ( filter_by_argument_name, ) ABI = [ { "constant": False, "inputs": [], "name": "func_1", "outputs": [], "type": "function", }, { "constant": False, "inputs": [ {"name": "a", "type": "uint256"}, ], "name": "func_2", "outputs": [], "type": "function", }, { "constant": False, "inputs": [ {"name": "a", "type": "uint256"}, {"name": "b", "type": "uint256"}, ], "name": "func_3", "outputs": [], "type": "function", }, { "constant": False, "inputs": [ {"name": "a", "type": "uint256"}, {"name": "b", "type": "uint256"}, {"name": "c", "type": "uint256"}, ], "name": "func_4", "outputs": [], "type": "function", }, ] @pytest.mark.parametrize( 'argument_names,expected', ( ([], ['func_1', 'func_2', 'func_3', 'func_4']), (['a'], ['func_2', 'func_3', 'func_4']), (['a', 'c'], ['func_4']), (['c'], ['func_4']), (['b'], ['func_3', 'func_4']), ) ) def test_filter_by_arguments_1(argument_names, expected): actual_matches = filter_by_argument_name(argument_names, ABI) function_names = [match['name'] for match in actual_matches] assert set(function_names) == set(expected)
mit
atlassian/boto
boto/datapipeline/exceptions.py
235
1471
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.exception import JSONResponseError class PipelineDeletedException(JSONResponseError): pass class InvalidRequestException(JSONResponseError): pass class TaskNotFoundException(JSONResponseError): pass class PipelineNotFoundException(JSONResponseError): pass class InternalServiceError(JSONResponseError): pass
mit
synmnstr/flexx
flexx/webruntime/chromeapp.py
12
2788
""" Web runtime based on a chrome app In contrast to running in the chrome browser, this makes the app have more the look and feel of a desktop app. """ import os import sys from .common import DesktopRuntime # todo: icon, sizing, etc. def get_chrome_exe(): """ Get the path of the Chrome executable If the path could not be found, returns None. """ paths = [] # Collect possible locations if sys.platform.startswith('win'): paths.append("C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe") paths.append("C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe") paths.append(os.path.expanduser("~\\AppData\\Local\\Google\\Chrome\\chrome.exe")) paths.append(os.path.expanduser("~\\Local Settings\\Application Data\\Google\\Chrome\\chrome.exe")) # xp elif sys.platform.startswith('linux'): paths.append('/usr/bin/google-chrome-stable') paths.append('/usr/bin/google-chrome-beta') paths.append('/usr/bin/google-chrome-dev') elif sys.platform.startswith('darwin'): paths.append('/Applications/Chrome.app') # Try location until we find one that exists for path in paths: if os.path.isfile(path): return path else: return None def get_chromium_exe(): """ Get the path of the Chromium executable If the path could not be found, returns None. """ paths = [] # Collect possible locations if sys.platform.startswith('win'): paths.append("C:\\Program Files\\Chromium\\Application\\chrome.exe") paths.append("C:\\Program Files (x86)\\Chromium\\Application\\chrome.exe") paths.append(os.path.expanduser("~\\AppData\\Local\\Chromium\\chrome.exe")) paths.append(os.path.expanduser("~\\Local Settings\\Application Data\\Chromium\\chrome.exe")) # xp elif sys.platform.startswith('linux'): paths.append('/usr/bin/chromium') elif sys.platform.startswith('darwin'): paths.append('/Applications/Chromium.app') # Try location until we find one that exists for path in paths: if os.path.isfile(path): return path else: return None class ChromeAppRuntime(DesktopRuntime): """ Desktop runtime based on chrome app. Requires the Chrome or Chromium browser to be installed. Note: icon, sizing and title is not yet supported. """ def _launch(self): # Get chrome executable exe = get_chrome_exe() or get_chromium_exe() if exe is None: raise RuntimeError('Chrome or Chromium browser was not detected.') # Launch url url = self._kwargs['url'] self._start_subprocess([exe, '--incognito', '--app=%s' % url])
bsd-2-clause
perkinslr/pypyjs
website/js/pypy.js-0.2.0/lib/modules/_marshal.py
11
17225
"""Internal Python object serialization This module contains functions that can read and write Python values in a binary format. The format is specific to Python, but independent of machine architecture issues (e.g., you can write a Python value to a file on a PC, transport the file to a Sun, and read it back there). Details of the format may change between Python versions. """ # NOTE: This module is used in the Python3 interpreter, but also by # the "sandboxed" process. It must work for Python2 as well. import types try: intern except NameError: from sys import intern try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f TYPE_NULL = '0' TYPE_NONE = 'N' TYPE_FALSE = 'F' TYPE_TRUE = 'T' TYPE_STOPITER = 'S' TYPE_ELLIPSIS = '.' TYPE_INT = 'i' TYPE_INT64 = 'I' TYPE_FLOAT = 'f' TYPE_COMPLEX = 'x' TYPE_LONG = 'l' TYPE_STRING = 's' TYPE_INTERNED = 't' TYPE_STRINGREF= 'R' TYPE_TUPLE = '(' TYPE_LIST = '[' TYPE_DICT = '{' TYPE_CODE = 'c' TYPE_UNICODE = 'u' TYPE_UNKNOWN = '?' TYPE_SET = '<' TYPE_FROZENSET= '>' class _Marshaller: dispatch = {} def __init__(self, writefunc): self._write = writefunc def dump(self, x): try: self.dispatch[type(x)](self, x) except KeyError: for tp in type(x).mro(): func = self.dispatch.get(tp) if func: break else: raise ValueError("unmarshallable object") func(self, x) def w_long64(self, x): self.w_long(x) self.w_long(x>>32) def w_long(self, x): a = chr(x & 0xff) x >>= 8 b = chr(x & 0xff) x >>= 8 c = chr(x & 0xff) x >>= 8 d = chr(x & 0xff) self._write(a + b + c + d) def w_short(self, x): self._write(chr((x) & 0xff)) self._write(chr((x>> 8) & 0xff)) def dump_none(self, x): self._write(TYPE_NONE) dispatch[type(None)] = dump_none def dump_bool(self, x): if x: self._write(TYPE_TRUE) else: self._write(TYPE_FALSE) dispatch[bool] = dump_bool def dump_stopiter(self, x): if x is not StopIteration: raise ValueError("unmarshallable object") self._write(TYPE_STOPITER) dispatch[type(StopIteration)] = dump_stopiter def dump_ellipsis(self, x): self._write(TYPE_ELLIPSIS) try: dispatch[type(Ellipsis)] = dump_ellipsis except NameError: pass # In Python3, this function is not used; see dump_long() below. def dump_int(self, x): y = x>>31 if y and y != -1: self._write(TYPE_INT64) self.w_long64(x) else: self._write(TYPE_INT) self.w_long(x) dispatch[int] = dump_int def dump_long(self, x): self._write(TYPE_LONG) sign = 1 if x < 0: sign = -1 x = -x digits = [] while x: digits.append(x & 0x7FFF) x = x>>15 self.w_long(len(digits) * sign) for d in digits: self.w_short(d) try: long except NameError: dispatch[int] = dump_long else: dispatch[long] = dump_long def dump_float(self, x): write = self._write write(TYPE_FLOAT) s = repr(x) write(chr(len(s))) write(s) dispatch[float] = dump_float def dump_complex(self, x): write = self._write write(TYPE_COMPLEX) s = repr(x.real) write(chr(len(s))) write(s) s = repr(x.imag) write(chr(len(s))) write(s) try: dispatch[complex] = dump_complex except NameError: pass def dump_string(self, x): # XXX we can't check for interned strings, yet, # so we (for now) never create TYPE_INTERNED or TYPE_STRINGREF self._write(TYPE_STRING) self.w_long(len(x)) self._write(x) dispatch[bytes] = dump_string def dump_unicode(self, x): self._write(TYPE_UNICODE) s = x.encode('utf8') self.w_long(len(s)) self._write(s) try: unicode except NameError: dispatch[str] = dump_unicode else: dispatch[unicode] = dump_unicode def dump_tuple(self, x): self._write(TYPE_TUPLE) self.w_long(len(x)) for item in x: self.dump(item) dispatch[tuple] = dump_tuple def dump_list(self, x): self._write(TYPE_LIST) self.w_long(len(x)) for item in x: self.dump(item) dispatch[list] = dump_list def dump_dict(self, x): self._write(TYPE_DICT) for key, value in x.items(): self.dump(key) self.dump(value) self._write(TYPE_NULL) dispatch[dict] = dump_dict def dump_code(self, x): self._write(TYPE_CODE) self.w_long(x.co_argcount) self.w_long(x.co_nlocals) self.w_long(x.co_stacksize) self.w_long(x.co_flags) self.dump(x.co_code) self.dump(x.co_consts) self.dump(x.co_names) self.dump(x.co_varnames) self.dump(x.co_freevars) self.dump(x.co_cellvars) self.dump(x.co_filename) self.dump(x.co_name) self.w_long(x.co_firstlineno) self.dump(x.co_lnotab) try: dispatch[types.CodeType] = dump_code except NameError: pass def dump_set(self, x): self._write(TYPE_SET) self.w_long(len(x)) for each in x: self.dump(each) try: dispatch[set] = dump_set except NameError: pass def dump_frozenset(self, x): self._write(TYPE_FROZENSET) self.w_long(len(x)) for each in x: self.dump(each) try: dispatch[frozenset] = dump_frozenset except NameError: pass class _NULL: pass class _StringBuffer: def __init__(self, value): self.bufstr = value self.bufpos = 0 def read(self, n): pos = self.bufpos newpos = pos + n ret = self.bufstr[pos : newpos] self.bufpos = newpos return ret class _Unmarshaller: dispatch = {} def __init__(self, readfunc): self._read = readfunc self._stringtable = [] def load(self): c = self._read(1) if not c: raise EOFError try: return self.dispatch[c](self) except KeyError: raise ValueError("bad marshal code: %c (%d)" % (c, ord(c))) def r_short(self): lo = ord(self._read(1)) hi = ord(self._read(1)) x = lo | (hi<<8) if x & 0x8000: x = x - 0x10000 return x def r_long(self): s = self._read(4) a = ord(s[0]) b = ord(s[1]) c = ord(s[2]) d = ord(s[3]) x = a | (b<<8) | (c<<16) | (d<<24) if d & 0x80 and x > 0: x = -((1<<32) - x) return int(x) else: return x def r_long64(self): a = ord(self._read(1)) b = ord(self._read(1)) c = ord(self._read(1)) d = ord(self._read(1)) e = ord(self._read(1)) f = ord(self._read(1)) g = ord(self._read(1)) h = ord(self._read(1)) x = a | (b<<8) | (c<<16) | (d<<24) x = x | (e<<32) | (f<<40) | (g<<48) | (h<<56) if h & 0x80 and x > 0: x = -((1<<64) - x) return x def load_null(self): return _NULL dispatch[TYPE_NULL] = load_null def load_none(self): return None dispatch[TYPE_NONE] = load_none def load_true(self): return True dispatch[TYPE_TRUE] = load_true def load_false(self): return False dispatch[TYPE_FALSE] = load_false def load_stopiter(self): return StopIteration dispatch[TYPE_STOPITER] = load_stopiter def load_ellipsis(self): return Ellipsis dispatch[TYPE_ELLIPSIS] = load_ellipsis dispatch[TYPE_INT] = r_long dispatch[TYPE_INT64] = r_long64 def load_long(self): size = self.r_long() sign = 1 if size < 0: sign = -1 size = -size x = 0 for i in range(size): d = self.r_short() x = x | (d<<(i*15)) return x * sign dispatch[TYPE_LONG] = load_long def load_float(self): n = ord(self._read(1)) s = self._read(n) return float(s) dispatch[TYPE_FLOAT] = load_float def load_complex(self): n = ord(self._read(1)) s = self._read(n) real = float(s) n = ord(self._read(1)) s = self._read(n) imag = float(s) return complex(real, imag) dispatch[TYPE_COMPLEX] = load_complex def load_string(self): n = self.r_long() return self._read(n) dispatch[TYPE_STRING] = load_string def load_interned(self): n = self.r_long() ret = intern(self._read(n)) self._stringtable.append(ret) return ret dispatch[TYPE_INTERNED] = load_interned def load_stringref(self): n = self.r_long() return self._stringtable[n] dispatch[TYPE_STRINGREF] = load_stringref def load_unicode(self): n = self.r_long() s = self._read(n) ret = s.decode('utf8') return ret dispatch[TYPE_UNICODE] = load_unicode def load_tuple(self): return tuple(self.load_list()) dispatch[TYPE_TUPLE] = load_tuple def load_list(self): n = self.r_long() list = [self.load() for i in range(n)] return list dispatch[TYPE_LIST] = load_list def load_dict(self): d = {} while 1: key = self.load() if key is _NULL: break value = self.load() d[key] = value return d dispatch[TYPE_DICT] = load_dict def load_code(self): argcount = self.r_long() nlocals = self.r_long() stacksize = self.r_long() flags = self.r_long() code = self.load() consts = self.load() names = self.load() varnames = self.load() freevars = self.load() cellvars = self.load() filename = self.load() name = self.load() firstlineno = self.r_long() lnotab = self.load() return types.CodeType(argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars) dispatch[TYPE_CODE] = load_code def load_set(self): n = self.r_long() args = [self.load() for i in range(n)] return set(args) dispatch[TYPE_SET] = load_set def load_frozenset(self): n = self.r_long() args = [self.load() for i in range(n)] return frozenset(args) dispatch[TYPE_FROZENSET] = load_frozenset # ________________________________________________________________ def _read(self, n): pos = self.bufpos newpos = pos + n if newpos > len(self.bufstr): raise EOFError ret = self.bufstr[pos : newpos] self.bufpos = newpos return ret def _read1(self): ret = self.bufstr[self.bufpos] self.bufpos += 1 return ret def _r_short(self): lo = ord(_read1(self)) hi = ord(_read1(self)) x = lo | (hi<<8) if x & 0x8000: x = x - 0x10000 return x def _r_long(self): # inlined this most common case p = self.bufpos s = self.bufstr a = ord(s[p]) b = ord(s[p+1]) c = ord(s[p+2]) d = ord(s[p+3]) self.bufpos += 4 x = a | (b<<8) | (c<<16) | (d<<24) if d & 0x80 and x > 0: x = -((1<<32) - x) return int(x) else: return x def _r_long64(self): a = ord(_read1(self)) b = ord(_read1(self)) c = ord(_read1(self)) d = ord(_read1(self)) e = ord(_read1(self)) f = ord(_read1(self)) g = ord(_read1(self)) h = ord(_read1(self)) x = a | (b<<8) | (c<<16) | (d<<24) x = x | (e<<32) | (f<<40) | (g<<48) | (h<<56) if h & 0x80 and x > 0: x = -((1<<64) - x) return x _load_dispatch = {} class _FastUnmarshaller: dispatch = {} def __init__(self, buffer): self.bufstr = buffer self.bufpos = 0 self._stringtable = [] def load(self): # make flow space happy c = '?' try: c = self.bufstr[self.bufpos] self.bufpos += 1 return _load_dispatch[c](self) except KeyError: raise ValueError("bad marshal code: %c (%d)" % (c, ord(c))) except IndexError: raise EOFError def load_null(self): return _NULL dispatch[TYPE_NULL] = load_null def load_none(self): return None dispatch[TYPE_NONE] = load_none def load_true(self): return True dispatch[TYPE_TRUE] = load_true def load_false(self): return False dispatch[TYPE_FALSE] = load_false def load_stopiter(self): return StopIteration dispatch[TYPE_STOPITER] = load_stopiter def load_ellipsis(self): return Ellipsis dispatch[TYPE_ELLIPSIS] = load_ellipsis def load_int(self): return _r_long(self) dispatch[TYPE_INT] = load_int def load_int64(self): return _r_long64(self) dispatch[TYPE_INT64] = load_int64 def load_long(self): size = _r_long(self) sign = 1 if size < 0: sign = -1 size = -size x = 0 for i in range(size): d = _r_short(self) x = x | (d<<(i*15)) return x * sign dispatch[TYPE_LONG] = load_long def load_float(self): n = ord(_read1(self)) s = _read(self, n) return float(s) dispatch[TYPE_FLOAT] = load_float def load_complex(self): n = ord(_read1(self)) s = _read(self, n) real = float(s) n = ord(_read1(self)) s = _read(self, n) imag = float(s) return complex(real, imag) dispatch[TYPE_COMPLEX] = load_complex def load_string(self): n = _r_long(self) return _read(self, n) dispatch[TYPE_STRING] = load_string def load_interned(self): n = _r_long(self) ret = intern(_read(self, n)) self._stringtable.append(ret) return ret dispatch[TYPE_INTERNED] = load_interned def load_stringref(self): n = _r_long(self) return self._stringtable[n] dispatch[TYPE_STRINGREF] = load_stringref def load_unicode(self): n = _r_long(self) s = _read(self, n) ret = s.decode('utf8') return ret dispatch[TYPE_UNICODE] = load_unicode def load_tuple(self): return tuple(self.load_list()) dispatch[TYPE_TUPLE] = load_tuple def load_list(self): n = _r_long(self) list = [] for i in range(n): list.append(self.load()) return list dispatch[TYPE_LIST] = load_list def load_dict(self): d = {} while 1: key = self.load() if key is _NULL: break value = self.load() d[key] = value return d dispatch[TYPE_DICT] = load_dict def load_code(self): argcount = _r_long(self) nlocals = _r_long(self) stacksize = _r_long(self) flags = _r_long(self) code = self.load() consts = self.load() names = self.load() varnames = self.load() freevars = self.load() cellvars = self.load() filename = self.load() name = self.load() firstlineno = _r_long(self) lnotab = self.load() return types.CodeType(argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars) dispatch[TYPE_CODE] = load_code def load_set(self): n = _r_long(self) args = [self.load() for i in range(n)] return set(args) dispatch[TYPE_SET] = load_set def load_frozenset(self): n = _r_long(self) args = [self.load() for i in range(n)] return frozenset(args) dispatch[TYPE_FROZENSET] = load_frozenset _load_dispatch = _FastUnmarshaller.dispatch # _________________________________________________________________ # # user interface version = 1 @builtinify def dump(x, f, version=version): # XXX 'version' is ignored, we always dump in a version-0-compatible format m = _Marshaller(f.write) m.dump(x) @builtinify def load(f): um = _Unmarshaller(f.read) return um.load() @builtinify def dumps(x, version=version): # XXX 'version' is ignored, we always dump in a version-0-compatible format buffer = [] m = _Marshaller(buffer.append) m.dump(x) return ''.join(buffer) @builtinify def loads(s): um = _FastUnmarshaller(s) return um.load()
mit
ric2b/Vivaldi-browser
chromium/third_party/mako/examples/bench/basic.py
58
6913
# basic.py - basic benchmarks adapted from Genshi # Copyright (C) 2006 Edgewall Software # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN # IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from cgi import escape import os try: from StringIO import StringIO except ImportError: from io import StringIO import sys import timeit def u(stringlit): if sys.version_info >= (3,): return stringlit else: return stringlit.decode('latin1') __all__ = ['mako', 'mako_inheritance', 'jinja2', 'jinja2_inheritance', 'cheetah', 'django', 'myghty', 'genshi', 'kid'] # Templates content and constants TITLE = 'Just a test' USER = 'joe' ITEMS = ['Number %d' % num for num in range(1, 15)] U_ITEMS = [u(item) for item in ITEMS] def genshi(dirname, verbose=False): from genshi.template import TemplateLoader loader = TemplateLoader([dirname], auto_reload=False) template = loader.load('template.html') def render(): data = dict(title=TITLE, user=USER, items=ITEMS) return template.generate(**data).render('xhtml') if verbose: print(render()) return render def myghty(dirname, verbose=False): from myghty import interp interpreter = interp.Interpreter(component_root=dirname) def render(): data = dict(title=TITLE, user=USER, items=ITEMS) buffer = StringIO() interpreter.execute("template.myt", request_args=data, out_buffer=buffer) return buffer.getvalue() if verbose: print(render()) return render def mako(dirname, verbose=False): from mako.template import Template from mako.lookup import TemplateLookup disable_unicode = (sys.version_info < (3,)) lookup = TemplateLookup(directories=[dirname], filesystem_checks=False, disable_unicode=disable_unicode) template = lookup.get_template('template.html') def render(): return template.render(title=TITLE, user=USER, list_items=U_ITEMS) if verbose: print(template.code + " " + render()) return render mako_inheritance = mako def jinja2(dirname, verbose=False): from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader(dirname)) template = env.get_template('template.html') def render(): return template.render(title=TITLE, user=USER, list_items=U_ITEMS) if verbose: print(render()) return render jinja2_inheritance = jinja2 def cheetah(dirname, verbose=False): from Cheetah.Template import Template filename = os.path.join(dirname, 'template.tmpl') template = Template(file=filename) def render(): template.__dict__.update({'title': TITLE, 'user': USER, 'list_items': U_ITEMS}) return template.respond() if verbose: print(dir(template)) print(template.generatedModuleCode()) print(render()) return render def django(dirname, verbose=False): from django.conf import settings settings.configure(TEMPLATE_DIRS=[os.path.join(dirname, 'templates')]) from django import template, templatetags from django.template import loader templatetags.__path__.append(os.path.join(dirname, 'templatetags')) tmpl = loader.get_template('template.html') def render(): data = {'title': TITLE, 'user': USER, 'items': ITEMS} return tmpl.render(template.Context(data)) if verbose: print(render()) return render def kid(dirname, verbose=False): import kid kid.path = kid.TemplatePath([dirname]) template = kid.Template(file='template.kid') def render(): template = kid.Template(file='template.kid', title=TITLE, user=USER, items=ITEMS) return template.serialize(output='xhtml') if verbose: print(render()) return render def run(engines, number=2000, verbose=False): basepath = os.path.abspath(os.path.dirname(__file__)) for engine in engines: dirname = os.path.join(basepath, engine) if verbose: print('%s:' % engine.capitalize()) print('--------------------------------------------------------') else: sys.stdout.write('%s:' % engine.capitalize()) t = timeit.Timer(setup='from __main__ import %s; render = %s(r"%s", %s)' % (engine, engine, dirname, verbose), stmt='render()') time = t.timeit(number=number) / number if verbose: print('--------------------------------------------------------') print('%.2f ms' % (1000 * time)) if verbose: print('--------------------------------------------------------') if __name__ == '__main__': engines = [arg for arg in sys.argv[1:] if arg[0] != '-'] if not engines: engines = __all__ verbose = '-v' in sys.argv if '-p' in sys.argv: try: import hotshot, hotshot.stats prof = hotshot.Profile("template.prof") benchtime = prof.runcall(run, engines, number=100, verbose=verbose) stats = hotshot.stats.load("template.prof") except ImportError: import cProfile, pstats stmt = "run(%r, number=%r, verbose=%r)" % (engines, 1000, verbose) cProfile.runctx(stmt, globals(), {}, "template.prof") stats = pstats.Stats("template.prof") stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats() else: run(engines, verbose=verbose)
bsd-3-clause
netroby/percona-xtrabackup
storage/innobase/xtrabackup/test/python/iso8601/test_iso8601.py
92
3044
import iso8601 def test_iso8601_regex(): assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z") def test_timezone_regex(): assert iso8601.TIMEZONE_REGEX.match("+01:00") assert iso8601.TIMEZONE_REGEX.match("+00:00") assert iso8601.TIMEZONE_REGEX.match("+01:20") assert iso8601.TIMEZONE_REGEX.match("-01:00") def test_parse_date(): d = iso8601.parse_date("2006-10-20T15:34:56Z") assert d.year == 2006 assert d.month == 10 assert d.day == 20 assert d.hour == 15 assert d.minute == 34 assert d.second == 56 assert d.tzinfo == iso8601.UTC def test_parse_date_fraction(): d = iso8601.parse_date("2006-10-20T15:34:56.123Z") assert d.year == 2006 assert d.month == 10 assert d.day == 20 assert d.hour == 15 assert d.minute == 34 assert d.second == 56 assert d.microsecond == 123000 assert d.tzinfo == iso8601.UTC def test_parse_date_fraction_2(): """From bug 6 """ d = iso8601.parse_date("2007-5-7T11:43:55.328Z'") assert d.year == 2007 assert d.month == 5 assert d.day == 7 assert d.hour == 11 assert d.minute == 43 assert d.second == 55 assert d.microsecond == 328000 assert d.tzinfo == iso8601.UTC def test_parse_date_tz(): d = iso8601.parse_date("2006-10-20T15:34:56.123+02:30") assert d.year == 2006 assert d.month == 10 assert d.day == 20 assert d.hour == 15 assert d.minute == 34 assert d.second == 56 assert d.microsecond == 123000 assert d.tzinfo.tzname(None) == "+02:30" offset = d.tzinfo.utcoffset(None) assert offset.days == 0 assert offset.seconds == 60 * 60 * 2.5 def test_parse_invalid_date(): try: iso8601.parse_date(None) except iso8601.ParseError: pass else: assert 1 == 2 def test_parse_invalid_date2(): try: iso8601.parse_date("23") except iso8601.ParseError: pass else: assert 1 == 2 def test_parse_no_timezone(): """issue 4 - Handle datetime string without timezone This tests what happens when you parse a date with no timezone. While not strictly correct this is quite common. I'll assume UTC for the time zone in this case. """ d = iso8601.parse_date("2007-01-01T08:00:00") assert d.year == 2007 assert d.month == 1 assert d.day == 1 assert d.hour == 8 assert d.minute == 0 assert d.second == 0 assert d.microsecond == 0 assert d.tzinfo == iso8601.UTC def test_parse_no_timezone_different_default(): tz = iso8601.FixedOffset(2, 0, "test offset") d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz) assert d.tzinfo == tz def test_space_separator(): """Handle a separator other than T """ d = iso8601.parse_date("2007-06-23 06:40:34.00Z") assert d.year == 2007 assert d.month == 6 assert d.day == 23 assert d.hour == 6 assert d.minute == 40 assert d.second == 34 assert d.microsecond == 0 assert d.tzinfo == iso8601.UTC
gpl-2.0
liorvh/raspberry_pwn
src/pentest/voiper/torturer/replay.py
8
12177
''' This file is part of VoIPER. VoIPER is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. VoIPER is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with VoIPER. If not, see <http://www.gnu.org/licenses/>. Copyright 2008, http://www.unprotectedhex.com Contact: nnp@unprotectedhex.com ''' import socket import os import re import sys from protocol_logic.sip_utilities import SIPCrashDetector class TortureMessage: def __init__(self, data, name, type, is_invite=False, invite_details=[]): ''' Represents a torture message @type data: String @param data: The data of the torture message @type name: String @param name: The name of the test case @type type: String @param name: Either 'valid' or 'invalid' @type is_invite: Boolean @param is_invite: Whether the message is an INVITE or not @type invite_details: List @param invite_details: The details from the INVITE necessary to cancel it ''' self.data = data self.name = name self.type = type self.response = "" self.is_invite = is_invite self.invite_details = invite_details def get_data(self): ''' Returns the data of the torture message @rtype: String @return: The data of the torture message ''' return self.data class Dispatcher: def __init__(self, host, port, messages, proto="udp", timeout="3.0", \ crash_detection=False): ''' Handles the dispatching of a group of SIP torture messages extracted from RFC 4475 @type host: String @param host: The host to send the test messages to @type port: Integer @param port: The port to send the test messages to @type messages: Dictionary @param messages: A dictionary containing lists of valid and invalid test messages @type proto: String @param proto: (Optional, def=udp) The protocol to encapsulate the test messages in @type timeout: Float @param timeout: (Optional, def=3.0) Timeout for all socket operations @type crash_detection: Boolean @param crash_detection: (Optional, def=False) Attempt to detect crashes using OPTIONS probes or not ''' self.host = host self.port = port self.proto = proto self.messages = messages self.timeout = timeout self.last_recv = "" self.crash_detection = crash_detection if self.crash_detection: self.crash_detector = SIPCrashDetector(self.host, self.port, \ timeout) def __target_responding(self): ''' Attempts to detect if the target application has crashed using the SIPCrashDetector class ''' return self.crash_detector.is_responding() def __send(self, torture_msg): ''' Send a torture message @type torture_msg: TortureMessage @param torture_msg: The torture message to be sent ''' data = torture_msg.get_data() if self.proto == "udp": sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(self.timeout) if len(data) > 9216: print "Too much data for UDP, truncating to 9216 bytes" data = data[:9216] sock.sendto(data, (self.host, self.port)) try: self.last_recv = sock.recvfrom(4096)[0] except Exception, e: self.last_recv = "" print '[=] Response : ' + self.last_recv elif self.proto == "tcp": sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(self.timeout) sock.connect((self.host, self.port)) total_sent = 0 while total_sent < len(data): sent = sock.send(data[totalSent:]) if sent == 0: raise RuntimeError("Error on socket.send()") total_sent += sent try: self.last_recv = sock.recv(4096) except Exception, e: self.last_recv = "" def dispatch(self, type="all"): ''' Dispatch all the test messages of which match the type @type type: String @param type: (Optional, def=all) The type of messages to send. Can be 'all', 'valid' or 'invalid' ''' valid_msgs = self.messages['valid'] invalid_msgs = self.messages['invalid'] if type == "all" or type == "valid": print '[+] Sending VALID messages' for msg in valid_msgs: print '[-] Sending ' + msg.name self.__send(msg) msg.response = self.last_recv if self.crash_detection: if not self.__target_responding(): print "Possible crash detected after test" + \ msg.name raw_input("Press any key to continue testing.....") if type == "all" or type == "invalid": print '[+] Sending INVALID messages' for msg in invalid_msgs: print '[-] Sending ' + msg.name self.__send(msg) msg.response = self.last_recv if self.crash_detection: if self.__detect_crash(): print "Possible crash detected after test" + \ msg.name raw_input("Press any key to continue testing.....") class Parser: def __init__(self, directory): ''' A parser for SIP test cases. Creates TortureMessage objects and stores them in a dictionary. @type directory: String @param directory: The directory in which to search for the SIP test cases ''' self.directory = directory self.messages = {'valid' : [], 'invalid' : [], } def parse(self): ''' Parses all the files in a given directory for valid/invalid SIP test cases. The files are identified as valid/invalid based on the filename. Files that are valid should have the extension '.valid' and invalid files the extension '.invalid' @rtype: Dictionary @return: A dictionary of valid and invalid TortureMessage @todo: Extract the REGISTER tests from RFC 4475 and save them along with the other tests ''' dirList = os.listdir(self.directory) for fname in dirList: if fname.find('.svn')!= -1: continue file = open(self.directory + '/' + fname, 'r') msg = self.__extract_packet(file) file.close() # detect INVITE messages so we can CANCEL them invite_msg = False invite_details = [] ''' Comment this stuff back in for cancelling sent INVITES if msg.find('INVITE') != -1: invite_msg = True invite_details = self.__extract_invite_details(msg) ''' if fname.find('.valid') != -1: torture_msg = TortureMessage(msg, fname, 'Valid', \ invite_msg, invite_details) self.messages['valid'].append(torture_msg) elif fname.find('.invalid') != -1: torture_msg = TortureMessage(msg, fname, 'Invalid', \ invite_msg, invite_details) self.messages['invalid'].append(torture_msg) return self.messages def __extract_invite_details(self, msg): ''' Parses out the details required to cancel an invite request @type msg: String @param msg: A SIP message @rtype: List @return: A list containing the details from the SIP message requires to cancel it @todo: Fix the regex's to work on the more screwed up test messages ''' try: call_id = re.search('Call-ID\s*:\s*([\d\w@\.]+)', msg, re.IGNORECASE).group(1) uri = re.search('INVITE\s+([\d\w@\.:;-]+)\s+SIP/\d\.0', msg, re.IGNORECASE).group(1) to = re.search('To\s*:[\s*|\n |\n ](.*)', msg, re.IGNORECASE).group(1) cseq_num = re.search('Cseq\s*:\s*(\d)+', msg, re.IGNORECASE).group(1) from_ = re.search('From\s*:\s*(.*)', msg, re.IGNORECASE).group(1) except IndexError, e: return None return [uri, call_id, to, cseq_num, from_] def __extract_packet(self, file): ''' Parses the data in the file according to a set of rules to create a packet that conforms with the intentions of RFC 4475 The tags <allOneLine>, <hex> and <repeat> are parsed in accordance with RFC 4475 @type file: File @param file: A file object containing a SIP message from RFC 4475 @rtype: String @return: The parsed SIP packet ''' # strip out any whitespace from the end of lines line_list = [line.rstrip() for line in file] packet = [] x = 0 while x < len(line_list): if line_list[x] == '<allOneLine>': y = x while line_list[y] != '</allOneLine>': y += 1 x += 1 line = ''.join(line_list[x: y]) x = y + 1 else: line = line_list[x] x += 1 # the order of these parsings is important ctr = 0 while line.find('<repeat') != -1: line = self.__parse_repeat(line) while line.find('<hex>') != -1: line = self.__parse_hex(line) packet.append(line) # end of packet == \r\n\r\n packet.append('\r\n') return '\r\n'.join(packet) def __parse_hex(self, line): ''' Parses a line that contains the <hex> tag @type line: String @param line: The line containing the <hex> tag @rtype: String @return: The line with the tags removed and the string between them replaced with the correct hex digits ''' x = 0 # save these for later pre = line[:line.find('<hex')] post = line[line.find('</hex>') + 6:] line = line[line.find('<hex>') + 5:line.find('</hex>')] new_line = [] while x < len(line): # get the next two hex digits tmp = ''.join(line[x:x+2]) escaped = ('\\x' + tmp).decode('string_escape') new_line.append(escaped) x += 2 return pre + ''.join(new_line) + post def __parse_repeat(self, line): ''' Parses a line that contains the <repeat> tag @type line: String @param line: The line containing the <repeat> tag @rtype: String @return: The line with the tags removed and the string between them replaced with repeated sequence 'count' number of times ''' # save these for later pre = line[:line.find('<repeat')] post = line[line.find('</repeat>') + 9:] m = re.search('<repeat count=(\d+)>(.*?)</repeat>', line) count = int(m.group(1)) text = m.group(2) return pre + text*count + post
gpl-3.0
computersalat/ansible
lib/ansible/plugins/httpapi/__init__.py
33
3146
# (c) 2018 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type from abc import abstractmethod from ansible.plugins import AnsiblePlugin class HttpApiBase(AnsiblePlugin): def __init__(self, connection): super(HttpApiBase, self).__init__() self.connection = connection self._become = False self._become_pass = '' def set_become(self, become_context): self._become = become_context.become self._become_pass = getattr(become_context, 'become_pass') or '' def login(self, username, password): """Call a defined login endpoint to receive an authentication token. This should only be implemented if the API has a single endpoint which can turn HTTP basic auth into a token which can be reused for the rest of the calls for the session. """ pass def logout(self): """ Call to implement session logout. Method to clear session gracefully e.g. tokens granted in login need to be revoked. """ pass def update_auth(self, response, response_text): """Return per-request auth token. The response should be a dictionary that can be plugged into the headers of a request. The default implementation uses cookie data. If no authentication data is found, return None """ cookie = response.info().get('Set-Cookie') if cookie: return {'Cookie': cookie} return None def handle_httperror(self, exc): """Overridable method for dealing with HTTP codes. This method will attempt to handle known cases of HTTP status codes. If your API uses status codes to convey information in a regular way, you can override this method to handle it appropriately. :returns: * True if the code has been handled in a way that the request may be resent without changes. * False if the error cannot be handled or recovered from by the plugin. This will result in the HTTPError being raised as an exception for the caller to deal with as appropriate (most likely by failing). * Any other value returned is taken as a valid response from the server without making another request. In many cases, this can just be the original exception. """ if exc.code == 401: if self.connection._auth: # Stored auth appears to be invalid, clear and retry self.connection._auth = None self.login(self.connection.get_option('remote_user'), self.connection.get_option('password')) return True else: # Unauthorized and there's no token. Return an error return False return exc @abstractmethod def send_request(self, data, **message_kwargs): """Prepares and sends request(s) to device.""" pass
gpl-3.0
neerajvashistha/pa-dude
lib/python2.7/site-packages/django/conf/__init__.py
135
7622
""" Settings and configuration for Django. Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment variable, and then from django.conf.global_settings; see the global settings file for a list of all possible variables. """ import importlib import os import time import warnings from django.conf import global_settings from django.core.exceptions import ImproperlyConfigured from django.utils.deprecation import RemovedInDjango110Warning from django.utils.functional import LazyObject, empty ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE" class LazySettings(LazyObject): """ A lazy proxy for either global Django settings or a custom settings object. The user can manually configure settings prior to using them. Otherwise, Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE. """ def _setup(self, name=None): """ Load the settings module pointed to by the environment variable. This is used the first time we need any settings at all, if the user has not previously configured the settings manually. """ settings_module = os.environ.get(ENVIRONMENT_VARIABLE) if not settings_module: desc = ("setting %s" % name) if name else "settings" raise ImproperlyConfigured( "Requested %s, but settings are not configured. " "You must either define the environment variable %s " "or call settings.configure() before accessing settings." % (desc, ENVIRONMENT_VARIABLE)) self._wrapped = Settings(settings_module) def __repr__(self): # Hardcode the class name as otherwise it yields 'Settings'. if self._wrapped is empty: return '<LazySettings [Unevaluated]>' return '<LazySettings "%(settings_module)s">' % { 'settings_module': self._wrapped.SETTINGS_MODULE, } def __getattr__(self, name): if self._wrapped is empty: self._setup(name) return getattr(self._wrapped, name) def configure(self, default_settings=global_settings, **options): """ Called to manually configure the settings. The 'default_settings' parameter sets where to retrieve any unspecified values from (its argument must support attribute access (__getattr__)). """ if self._wrapped is not empty: raise RuntimeError('Settings already configured.') holder = UserSettingsHolder(default_settings) for name, value in options.items(): setattr(holder, name, value) self._wrapped = holder @property def configured(self): """ Returns True if the settings have already been configured. """ return self._wrapped is not empty class BaseSettings(object): """ Common logic for settings whether set by a module or by the user. """ def __setattr__(self, name, value): if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'): raise ImproperlyConfigured("If set, %s must end with a slash" % name) object.__setattr__(self, name, value) class Settings(BaseSettings): def __init__(self, settings_module): # update this dict from global settings (but only for ALL_CAPS settings) for setting in dir(global_settings): if setting.isupper(): setattr(self, setting, getattr(global_settings, setting)) # store the settings module in case someone later cares self.SETTINGS_MODULE = settings_module mod = importlib.import_module(self.SETTINGS_MODULE) tuple_settings = ( "ALLOWED_INCLUDE_ROOTS", "INSTALLED_APPS", "TEMPLATE_DIRS", "LOCALE_PATHS", ) self._explicit_settings = set() for setting in dir(mod): if setting.isupper(): setting_value = getattr(mod, setting) if (setting in tuple_settings and not isinstance(setting_value, (list, tuple))): raise ImproperlyConfigured("The %s setting must be a list or a tuple. " "Please fix your settings." % setting) setattr(self, setting, setting_value) self._explicit_settings.add(setting) if not self.SECRET_KEY: raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.") if ('django.contrib.auth.middleware.AuthenticationMiddleware' in self.MIDDLEWARE_CLASSES and 'django.contrib.auth.middleware.SessionAuthenticationMiddleware' not in self.MIDDLEWARE_CLASSES): warnings.warn( "Session verification will become mandatory in Django 1.10. " "Please add 'django.contrib.auth.middleware.SessionAuthenticationMiddleware' " "to your MIDDLEWARE_CLASSES setting when you are ready to opt-in after " "reading the upgrade considerations in the 1.8 release notes.", RemovedInDjango110Warning ) if hasattr(time, 'tzset') and self.TIME_ZONE: # When we can, attempt to validate the timezone. If we can't find # this file, no check happens and it's harmless. zoneinfo_root = '/usr/share/zoneinfo' if (os.path.exists(zoneinfo_root) and not os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))): raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE) # Move the time zone info into os.environ. See ticket #2315 for why # we don't do this unconditionally (breaks Windows). os.environ['TZ'] = self.TIME_ZONE time.tzset() def is_overridden(self, setting): return setting in self._explicit_settings def __repr__(self): return '<%(cls)s "%(settings_module)s">' % { 'cls': self.__class__.__name__, 'settings_module': self.SETTINGS_MODULE, } class UserSettingsHolder(BaseSettings): """ Holder for user configured settings. """ # SETTINGS_MODULE doesn't make much sense in the manually configured # (standalone) case. SETTINGS_MODULE = None def __init__(self, default_settings): """ Requests for configuration variables not in this class are satisfied from the module specified in default_settings (if possible). """ self.__dict__['_deleted'] = set() self.default_settings = default_settings def __getattr__(self, name): if name in self._deleted: raise AttributeError return getattr(self.default_settings, name) def __setattr__(self, name, value): self._deleted.discard(name) super(UserSettingsHolder, self).__setattr__(name, value) def __delattr__(self, name): self._deleted.add(name) if hasattr(self, name): super(UserSettingsHolder, self).__delattr__(name) def __dir__(self): return list(self.__dict__) + dir(self.default_settings) def is_overridden(self, setting): deleted = (setting in self._deleted) set_locally = (setting in self.__dict__) set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting) return (deleted or set_locally or set_on_default) def __repr__(self): return '<%(cls)s>' % { 'cls': self.__class__.__name__, } settings = LazySettings()
mit
pwns4cash/vivisect
envi/qt/memwrite.py
5
8919
import string import envi.memory as e_mem import envi.memcanvas as e_canvas import envi.memcanvas.renderers as e_render from PyQt4 import QtGui, QtCore class VQLineEdit(QtGui.QLineEdit): ''' Has an additional signal to emit a signal on release of every keypress. ''' keyReleased = QtCore.pyqtSignal(QtGui.QKeyEvent) def keyReleaseEvent(self, event): self.keyReleased.emit(event) QtGui.QLineEdit.keyReleaseEvent(self, event) class MemNavWidget(QtGui.QWidget): userChanged = QtCore.pyqtSignal(str, str) def __init__(self): QtGui.QWidget.__init__(self) self.expr_entry = QtGui.QLineEdit() self.esize_entry = QtGui.QLineEdit() hbox1 = QtGui.QHBoxLayout() hbox1.setMargin(2) hbox1.setSpacing(4) hbox1.addWidget(self.expr_entry) hbox1.addWidget(self.esize_entry) self.setLayout(hbox1) self.expr_entry.returnPressed.connect(self.emitUserChangedSignal) self.esize_entry.returnPressed.connect(self.emitUserChangedSignal) def emitUserChangedSignal(self): ''' Emits signal when user manually enters new expressions in the expr or size field and presses enter. ''' expr = str(self.expr_entry.text()) size = str(self.esize_entry.text()) self.userChanged.emit(expr, size) def setValues(self, expr, esize): ''' Called externally to allow programmatic way to update the expr or size field. Does not emit the changed signal. ''' self.expr_entry.setText(expr) self.esize_entry.setText(esize) def getValues(self): return str(self.expr_entry.text()), str(self.esize_entry.text()) class MemWriteWindow(QtGui.QWidget): ''' gui for writemem cli command. ''' renderRequest = QtCore.pyqtSignal(str, str) # button to write memory was clicked (va, bytez) writeToMemory = QtCore.pyqtSignal(str, str) def __init__(self, expr='', esize='', emu=None, parent=None): QtGui.QWidget.__init__(self, parent=parent) self.modes = ['ascii', 'hex', 'regex', 'utf-8', 'utf-16-le', 'utf-16-be'] rend_orig = e_render.ByteRend() self.canvas_orig = e_canvas.StringMemoryCanvas(None) self.canvas_orig.addRenderer('bytes', rend_orig) rend_new = e_render.ByteRend() self.canvas_new = e_canvas.StringMemoryCanvas(None) self.canvas_new.addRenderer('bytes', rend_new) hbox1 = QtGui.QHBoxLayout() self.nav = MemNavWidget() self.nav.userChanged.connect(self.renderMemory) self.renderRequest.connect(self.nav.setValues) hbox1.addWidget(self.nav) hbox2 = QtGui.QHBoxLayout() self.hex_edit = QtGui.QPlainTextEdit() self.hex_edit.setWordWrapMode(QtGui.QTextOption.NoWrap) self.hex_edit.setReadOnly(True) font = QtGui.QFont('Courier') # should use actual memcanvas self.hex_edit.setFont(font) hbox2.addWidget(self.hex_edit) vbox1 = QtGui.QVBoxLayout() vbox1.addLayout(hbox1) vbox1.addLayout(hbox2) gbox1 = QtGui.QGroupBox('Original Bytes') gbox1.setLayout(vbox1) hbox3 = QtGui.QHBoxLayout() mode_label = QtGui.QLabel('Input:') self.mode_combo = QtGui.QComboBox() self.mode_combo.addItems(self.modes) self.mode_combo.currentIndexChanged.connect(self.encodingChanged) hbox3.addWidget(mode_label) hbox3.addWidget(self.mode_combo, alignment=QtCore.Qt.AlignLeft) hbox3.addStretch(1) hbox4 = QtGui.QHBoxLayout() data_label = QtGui.QLabel('Bytes:') self.data_edit = VQLineEdit() self.data_edit.keyReleased.connect(self.keyReleasedSlot) hbox4.addWidget(data_label) hbox4.addWidget(self.data_edit) vbox2 = QtGui.QVBoxLayout() vbox2.addLayout(hbox3) vbox2.addLayout(hbox4) gbox2 = QtGui.QGroupBox('New Bytes') gbox2.setLayout(vbox2) hbox5 = QtGui.QHBoxLayout() self.hex_preview = QtGui.QPlainTextEdit() self.hex_preview.setWordWrapMode(QtGui.QTextOption.NoWrap) self.hex_preview.setReadOnly(True) self.hex_preview.setFont(font) hbox5.addWidget(self.hex_preview) vbox3 = QtGui.QVBoxLayout() vbox3.addLayout(hbox5) gbox3 = QtGui.QGroupBox('Result Preview') gbox3.setLayout(vbox3) hbox6 = QtGui.QHBoxLayout() button = QtGui.QPushButton('Write Memory') button.clicked.connect(self.buttonClicked) hbox6.addWidget(button) vbox = QtGui.QVBoxLayout() vbox.addWidget(gbox1) vbox.addWidget(gbox2) vbox.addWidget(gbox3) vbox.addLayout(hbox6) self.setLayout(vbox) self.setWindowTitle('Memory Write') self.resize(650, 500) self.data_edit.setFocus() self.emu = emu self.renderMemory(expr, esize) def renderMemory(self, expr=None, esize=None, emu=None): if emu != None: self.emu = emu curexpr, cur_esize = self.nav.getValues() if expr == None: expr = curexpr if esize == None: esize = cur_esize self.renderRequest.emit(expr, esize) try: # str() for QString -> ascii strings va = self.emu.parseExpression(str(expr)) size = self.emu.parseExpression(str(esize)) bytez = self.emu.readMemory(va, size) self.updateHexOrig(va, bytez) encoding = str(self.mode_combo.currentText()) rbytes = str(self.data_edit.text()) erbytes = self.encodeData(rbytes, encoding) # encoded bytes is bigger than the amount we are displaying. if len(erbytes) > len(bytez): self.hex_preview.setPlainText('too many bytes, change size, encoding or input') return bytez = erbytes + bytez[len(erbytes):] self.updateHexPreview(va, bytez) except Exception as e: self.hex_preview.setPlainText(str(e)) def keyReleasedSlot(self, event): encoding = self.mode_combo.currentText() self.encodingChanged(None) def encodingChanged(self, idx): encoding = str(self.mode_combo.currentText()) validator = None if encoding == 'hex': # only clear the box if there are non-hex chars # before setting the validator. txt = str(self.data_edit.text()) if not all(c in string.hexdigits for c in txt): self.data_edit.setText('') regex = QtCore.QRegExp('^[0-9A-Fa-f]+$') validator = QtGui.QRegExpValidator(regex) self.data_edit.setValidator(validator) self.renderMemory() def encodeData(self, txt, encoding): if encoding == 'hex' and (len(txt) % 2) != 0: txt = txt[:-1] # trim last if odd length if encoding == 'hex': if not all(c in string.hexdigits for c in txt): return None return txt.decode(encoding) elif encoding == 'regex': return None return txt.encode(encoding) def updateHexOrig(self, va, bytez): if bytez == None: self.hex_edit.setPlainText('') return self.canvas_orig.clearCanvas() mem = e_mem.MemoryObject() mem.addMemoryMap(va, e_mem.MM_READ, '', bytez) self.canvas_orig.mem = mem self.canvas_orig.renderMemory(va, len(bytez)) self.hex_edit.setPlainText(str(self.canvas_orig)) def updateHexPreview(self, va, bytez): if bytez == None: self.hex_preview.setPlainText('') return self.canvas_new.clearCanvas() mem = e_mem.MemoryObject() mem.addMemoryMap(va, e_mem.MM_READ, '', bytez) self.canvas_new.mem = mem self.canvas_new.renderMemory(va, len(bytez)) self.hex_preview.setPlainText(str(self.canvas_new)) def buttonClicked(self): curexpr, cur_esize = self.nav.getValues() encoding = str(self.mode_combo.currentText()) rbytes = str(self.data_edit.text()) erbytes = self.encodeData(rbytes, encoding) hexbytes = erbytes.encode('hex') self.writeToMemory.emit(curexpr, hexbytes) def getValues(self): return self.nav.getValues() def setValues(self, expr, esize): self.nav.setValues(expr, esize) class MockEmu(object): def parseExpression(self, expr): return long(eval(expr, {}, {})) def readMemory(self, va, size): return '\x90' * size def main(): import sys app = QtGui.QApplication([]) w = MemWriteWindow('0x1234', '0xff', emu=MockEmu()) w.show() sys.exit(app.exec_()) if __name__ == '__main__': main()
apache-2.0
wfxiang08/jieba
jieba/posseg/__init__.py
57
9232
from __future__ import absolute_import, unicode_literals import os import re import sys import jieba import marshal from .._compat import * from .viterbi import viterbi PROB_START_P = "prob_start.p" PROB_TRANS_P = "prob_trans.p" PROB_EMIT_P = "prob_emit.p" CHAR_STATE_TAB_P = "char_state_tab.p" re_han_detail = re.compile("([\u4E00-\u9FA5]+)") re_skip_detail = re.compile("([\.0-9]+|[a-zA-Z0-9]+)") re_han_internal = re.compile("([\u4E00-\u9FA5a-zA-Z0-9+#&\._]+)") re_skip_internal = re.compile("(\r\n|\s)") re_eng = re.compile("[a-zA-Z0-9]+") re_num = re.compile("[\.0-9]+") re_eng1 = re.compile('^[a-zA-Z0-9]$', re.U) def load_model(f_name): _curpath = os.path.normpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) # For Jython start_p = {} abs_path = os.path.join(_curpath, PROB_START_P) with open(abs_path, 'rb') as f: start_p = marshal.load(f) trans_p = {} abs_path = os.path.join(_curpath, PROB_TRANS_P) with open(abs_path, 'rb') as f: trans_p = marshal.load(f) emit_p = {} abs_path = os.path.join(_curpath, PROB_EMIT_P) with open(abs_path, 'rb') as f: emit_p = marshal.load(f) state = {} abs_path = os.path.join(_curpath, CHAR_STATE_TAB_P) with open(abs_path, 'rb') as f: state = marshal.load(f) f.closed return state, start_p, trans_p, emit_p, result if sys.platform.startswith("java"): char_state_tab_P, start_P, trans_P, emit_P, word_tag_tab = load_model() else: from .char_state_tab import P as char_state_tab_P from .prob_start import P as start_P from .prob_trans import P as trans_P from .prob_emit import P as emit_P class pair(object): def __init__(self, word, flag): self.word = word self.flag = flag def __unicode__(self): return '%s/%s' % (self.word, self.flag) def __repr__(self): return 'pair(%r, %r)' % (self.word, self.flag) def __str__(self): if PY2: return self.__unicode__().encode(default_encoding) else: return self.__unicode__() def __iter__(self): return iter((self.word, self.flag)) def encode(self, arg): return self.__unicode__().encode(arg) class POSTokenizer(object): def __init__(self, tokenizer=None): self.tokenizer = tokenizer or jieba.Tokenizer() self.load_word_tag(self.tokenizer.get_abs_path_dict()) def __repr__(self): return '<POSTokenizer tokenizer=%r>' % self.tokenizer def __getattr__(self, name): if name in ('cut_for_search', 'lcut_for_search', 'tokenize'): # may be possible? raise NotImplementedError return getattr(self.tokenizer, name) def initialize(self, dictionary=None): self.tokenizer.initialize(dictionary) self.load_word_tag(self.tokenizer.get_abs_path_dict()) def load_word_tag(self, f_name): self.word_tag_tab = {} with open(f_name, "rb") as f: for lineno, line in enumerate(f, 1): try: line = line.strip().decode("utf-8") if not line: continue word, _, tag = line.split(" ") self.word_tag_tab[word] = tag except Exception: raise ValueError( 'invalid POS dictionary entry in %s at Line %s: %s' % (f_name, lineno, line)) def makesure_userdict_loaded(self): if self.tokenizer.user_word_tag_tab: self.word_tag_tab.update(self.tokenizer.user_word_tag_tab) self.tokenizer.user_word_tag_tab = {} def __cut(self, sentence): prob, pos_list = viterbi( sentence, char_state_tab_P, start_P, trans_P, emit_P) begin, nexti = 0, 0 for i, char in enumerate(sentence): pos = pos_list[i][0] if pos == 'B': begin = i elif pos == 'E': yield pair(sentence[begin:i + 1], pos_list[i][1]) nexti = i + 1 elif pos == 'S': yield pair(char, pos_list[i][1]) nexti = i + 1 if nexti < len(sentence): yield pair(sentence[nexti:], pos_list[nexti][1]) def __cut_detail(self, sentence): blocks = re_han_detail.split(sentence) for blk in blocks: if re_han_detail.match(blk): for word in self.__cut(blk): yield word else: tmp = re_skip_detail.split(blk) for x in tmp: if x: if re_num.match(x): yield pair(x, 'm') elif re_eng.match(x): yield pair(x, 'eng') else: yield pair(x, 'x') def __cut_DAG_NO_HMM(self, sentence): DAG = self.tokenizer.get_DAG(sentence) route = {} self.tokenizer.calc(sentence, DAG, route) x = 0 N = len(sentence) buf = '' while x < N: y = route[x][1] + 1 l_word = sentence[x:y] if re_eng1.match(l_word): buf += l_word x = y else: if buf: yield pair(buf, 'eng') buf = '' yield pair(l_word, self.word_tag_tab.get(l_word, 'x')) x = y if buf: yield pair(buf, 'eng') buf = '' def __cut_DAG(self, sentence): DAG = self.tokenizer.get_DAG(sentence) route = {} self.tokenizer.calc(sentence, DAG, route) x = 0 buf = '' N = len(sentence) while x < N: y = route[x][1] + 1 l_word = sentence[x:y] if y - x == 1: buf += l_word else: if buf: if len(buf) == 1: yield pair(buf, self.word_tag_tab.get(buf, 'x')) elif not self.tokenizer.FREQ.get(buf): recognized = self.__cut_detail(buf) for t in recognized: yield t else: for elem in buf: yield pair(elem, self.word_tag_tab.get(elem, 'x')) buf = '' yield pair(l_word, self.word_tag_tab.get(l_word, 'x')) x = y if buf: if len(buf) == 1: yield pair(buf, self.word_tag_tab.get(buf, 'x')) elif not self.tokenizer.FREQ.get(buf): recognized = self.__cut_detail(buf) for t in recognized: yield t else: for elem in buf: yield pair(elem, self.word_tag_tab.get(elem, 'x')) def __cut_internal(self, sentence, HMM=True): self.makesure_userdict_loaded() sentence = strdecode(sentence) blocks = re_han_internal.split(sentence) if HMM: cut_blk = self.__cut_DAG else: cut_blk = self.__cut_DAG_NO_HMM for blk in blocks: if re_han_internal.match(blk): for word in cut_blk(blk): yield word else: tmp = re_skip_internal.split(blk) for x in tmp: if re_skip_internal.match(x): yield pair(x, 'x') else: for xx in x: if re_num.match(xx): yield pair(xx, 'm') elif re_eng.match(x): yield pair(xx, 'eng') else: yield pair(xx, 'x') def _lcut_internal(self, sentence): return list(self.__cut_internal(sentence)) def _lcut_internal_no_hmm(self, sentence): return list(self.__cut_internal(sentence, False)) def cut(self, sentence, HMM=True): for w in self.__cut_internal(sentence, HMM=HMM): yield w def lcut(self, *args, **kwargs): return list(self.cut(*args, **kwargs)) # default Tokenizer instance dt = POSTokenizer(jieba.dt) # global functions initialize = dt.initialize def _lcut_internal(s): return dt._lcut_internal(s) def _lcut_internal_no_hmm(s): return dt._lcut_internal_no_hmm(s) def cut(sentence, HMM=True): """ Global `cut` function that supports parallel processing. Note that this only works using dt, custom POSTokenizer instances are not supported. """ global dt if jieba.pool is None: for w in dt.cut(sentence, HMM=HMM): yield w else: parts = strdecode(sentence).splitlines(True) if HMM: result = jieba.pool.map(_lcut_internal, parts) else: result = jieba.pool.map(_lcut_internal_no_hmm, parts) for r in result: for w in r: yield w def lcut(sentence, HMM=True): return list(cut(sentence, HMM))
mit
tomlof/scikit-learn
examples/plot_digits_pipe.py
65
1652
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Pipelining: chaining a PCA and a logistic regression ========================================================= The PCA does an unsupervised dimensionality reduction, while the logistic regression does the prediction. We use a GridSearchCV to set the dimensionality of the PCA """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, decomposition, datasets from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV logistic = linear_model.LogisticRegression() pca = decomposition.PCA() pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)]) digits = datasets.load_digits() X_digits = digits.data y_digits = digits.target # Plot the PCA spectrum pca.fit(X_digits) plt.figure(1, figsize=(4, 3)) plt.clf() plt.axes([.2, .2, .7, .7]) plt.plot(pca.explained_variance_, linewidth=2) plt.axis('tight') plt.xlabel('n_components') plt.ylabel('explained_variance_') # Prediction n_components = [20, 40, 64] Cs = np.logspace(-4, 4, 3) # Parameters of pipelines can be set using ‘__’ separated parameter names: estimator = GridSearchCV(pipe, dict(pca__n_components=n_components, logistic__C=Cs)) estimator.fit(X_digits, y_digits) plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components, linestyle=':', label='n_components chosen') plt.legend(prop=dict(size=12)) plt.show()
bsd-3-clause
was4444/chromium.src
tools/perf/measurements/v8_gc_times.py
5
9627
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.page import page_test from telemetry.timeline.model import TimelineModel from telemetry.timeline import tracing_config from telemetry.util import statistics from telemetry.value import scalar class V8GCTimes(page_test.PageTest): _TIME_OUT_IN_SECONDS = 60 _CATEGORIES = ['blink.console', 'renderer.scheduler', 'v8', 'webkit.console'] _RENDERER_MAIN_THREAD = 'CrRendererMain' _IDLE_TASK_PARENT = 'SingleThreadIdleTaskRunner::RunTask' def __init__(self): super(V8GCTimes, self).__init__() def WillNavigateToPage(self, page, tab): config = tracing_config.TracingConfig() for category in self._CATEGORIES: config.tracing_category_filter.AddIncludedCategory(category) config.enable_chrome_trace = True tab.browser.platform.tracing_controller.StartTracing( config, self._TIME_OUT_IN_SECONDS) def ValidateAndMeasurePage(self, page, tab, results): trace_data = tab.browser.platform.tracing_controller.StopTracing() timeline_model = TimelineModel(trace_data) renderer_process = timeline_model.GetRendererProcessFromTabId(tab.id) self._AddV8MetricsToResults(renderer_process, results) def DidRunPage(self, platform): if platform.tracing_controller.is_tracing_running: platform.tracing_controller.StopTracing() def _AddV8MetricsToResults(self, process, results): if process is None: return for thread in process.threads.values(): if thread.name != self._RENDERER_MAIN_THREAD: continue self._AddV8EventStatsToResults(thread, results) self._AddCpuTimeStatsToResults(thread, results) def _AddV8EventStatsToResults(self, thread, results): v8_event_stats = [ V8EventStat('V8.GCIncrementalMarking', 'v8_gc_incremental_marking', 'incremental marking steps'), V8EventStat('V8.GCScavenger', 'v8_gc_scavenger', 'scavenges'), V8EventStat('V8.GCCompactor', 'v8_gc_mark_compactor', 'mark-sweep-compactor'), V8EventStat('V8.GCFinalizeMC', 'v8_gc_finalize_incremental', 'finalization of incremental marking'), V8EventStat('V8.GCFinalizeMCReduceMemory', 'v8_gc_finalize_incremental_reduce_memory', 'finalization of incremental marking with memory reducer')] # Find all V8 GC events in the trace. for event in thread.IterAllSlices(): event_stat = _FindV8EventStatForEvent(v8_event_stats, event.name) if not event_stat: continue event_stat.thread_duration += event.thread_duration event_stat.max_thread_duration = max(event_stat.max_thread_duration, event.thread_duration) event_stat.count += 1 parent_idle_task = _ParentIdleTask(event) if parent_idle_task: allotted_idle_time = parent_idle_task.args['allotted_time_ms'] idle_task_wall_overrun = 0 if event.duration > allotted_idle_time: idle_task_wall_overrun = event.duration - allotted_idle_time # Don't count time over the deadline as being inside idle time. # Since the deadline should be relative to wall clock we compare # allotted_time_ms with wall duration instead of thread duration, and # then assume the thread duration was inside idle for the same # percentage of time. inside_idle = event.thread_duration * statistics.DivideIfPossibleOrZero( event.duration - idle_task_wall_overrun, event.duration) event_stat.thread_duration_inside_idle += inside_idle event_stat.idle_task_overrun_duration += idle_task_wall_overrun for v8_event_stat in v8_event_stats: results.AddValue(scalar.ScalarValue( results.current_page, v8_event_stat.result_name, 'ms', v8_event_stat.thread_duration, description=('Total thread duration spent in %s' % v8_event_stat.result_description))) results.AddValue(scalar.ScalarValue( results.current_page, '%s_max' % v8_event_stat.result_name, 'ms', v8_event_stat.max_thread_duration, description=('Max thread duration spent in %s' % v8_event_stat.result_description))) results.AddValue(scalar.ScalarValue( results.current_page, '%s_count' % v8_event_stat.result_name, 'count', v8_event_stat.count, description=('Number of %s' % v8_event_stat.result_description))) average_thread_duration = statistics.DivideIfPossibleOrZero( v8_event_stat.thread_duration, v8_event_stat.count) results.AddValue(scalar.ScalarValue( results.current_page, '%s_average' % v8_event_stat.result_name, 'ms', average_thread_duration, description=('Average thread duration spent in %s' % v8_event_stat.result_description))) results.AddValue(scalar.ScalarValue( results.current_page, '%s_outside_idle' % v8_event_stat.result_name, 'ms', v8_event_stat.thread_duration_outside_idle, description=( 'Total thread duration spent in %s outside of idle tasks' % v8_event_stat.result_description))) results.AddValue( scalar.ScalarValue( results.current_page, '%s_idle_deadline_overrun' % v8_event_stat.result_name, 'ms', v8_event_stat.idle_task_overrun_duration, description=( 'Total idle task deadline overrun for %s idle tasks' % v8_event_stat.result_description))) results.AddValue(scalar.ScalarValue( results.current_page, '%s_percentage_idle' % v8_event_stat.result_name, 'idle%', v8_event_stat.percentage_thread_duration_during_idle, description=( 'Percentage of %s spent in idle time' % v8_event_stat.result_description))) # Add total metrics. gc_total = sum(x.thread_duration for x in v8_event_stats) gc_total_outside_idle = sum( x.thread_duration_outside_idle for x in v8_event_stats) gc_total_idle_deadline_overrun = sum( x.idle_task_overrun_duration for x in v8_event_stats) gc_total_percentage_idle = statistics.DivideIfPossibleOrZero( 100 * (gc_total - gc_total_outside_idle), gc_total) results.AddValue( scalar.ScalarValue( results.current_page, 'v8_gc_total', 'ms', gc_total, description=('Total thread duration of all garbage ' 'collection events'))) results.AddValue( scalar.ScalarValue( results.current_page, 'v8_gc_total_outside_idle', 'ms', gc_total_outside_idle, description=( 'Total thread duration of all garbage collection events ' 'outside of idle tasks'))) results.AddValue( scalar.ScalarValue( results.current_page, 'v8_gc_total_idle_deadline_overrun', 'ms', gc_total_idle_deadline_overrun, description=( 'Total idle task deadline overrun for all idle tasks garbage ' 'collection events'))) results.AddValue( scalar.ScalarValue( results.current_page, 'v8_gc_total_percentage_idle', 'idle%', gc_total_percentage_idle, description=( 'Percentage of the thread duration of all garbage collection ' 'events spent inside of idle tasks'))) def _AddCpuTimeStatsToResults(self, thread, results): if thread.toplevel_slices: start_time = min(s.start for s in thread.toplevel_slices) end_time = max(s.end for s in thread.toplevel_slices) duration = end_time - start_time cpu_time = sum(s.thread_duration for s in thread.toplevel_slices) else: duration = cpu_time = 0 results.AddValue(scalar.ScalarValue( results.current_page, 'duration', 'ms', duration)) results.AddValue(scalar.ScalarValue( results.current_page, 'cpu_time', 'ms', cpu_time)) def _FindV8EventStatForEvent(v8_event_stats_list, event_name): for v8_event_stat in v8_event_stats_list: if v8_event_stat.src_event_name == event_name: return v8_event_stat return None def _ParentIdleTask(event): parent = event.parent_slice while parent: # pylint: disable=protected-access if parent.name == V8GCTimes._IDLE_TASK_PARENT: return parent parent = parent.parent_slice return None class V8EventStat(object): def __init__(self, src_event_name, result_name, result_description): self.src_event_name = src_event_name self.result_name = result_name self.result_description = result_description self.thread_duration = 0.0 self.thread_duration_inside_idle = 0.0 self.idle_task_overrun_duration = 0.0 self.max_thread_duration = 0.0 self.count = 0 @property def thread_duration_outside_idle(self): return self.thread_duration - self.thread_duration_inside_idle @property def percentage_thread_duration_during_idle(self): return statistics.DivideIfPossibleOrZero( 100 * self.thread_duration_inside_idle, self.thread_duration)
bsd-3-clause
iABC2XYZ/abc
DM_RFGAP_5/From ~4/RFGap.py
3
1368
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jul 31 22:10:38 2017 Author: Peiyong Jiang : jiangpeiyong@impcas.ac.cn Function: RFGap __________________________________________________ K,dE=RFGap() """ from InputBeam import qParticle,massMeV,mParticle,energyInMeV from Constants import pi from BetaGammaC import Energy2BetaGammaC from InputLattice import freqMHz from Lambda import LambdaM import tensorflow as tf from Lambda import BetaLambdaM def RFGap(ETLMV,phiPi,energyMeV): q_m_ETL=qParticle/mParticle*ETLMV pi_q_m_ETL=pi*q_m_ETL betaGammaC=Energy2BetaGammaC(energyMeV) betaGammaC3=tf.pow(betaGammaC,3) lambdaM=LambdaM(freqMHz) mc2_beta3_gamma3_lambda=massMeV*betaGammaC3*lambdaM sinPhi=tf.sin(phiPi) cosPhi=tf.cos(phiPi) K=tf.multiply(tf.div(pi_q_m_ETL,mc2_beta3_gamma3_lambda),sinPhi) dE=tf.multiply(q_m_ETL,cosPhi) return K,dE def LengthCellM(wETLMV,wPhis): dE=qParticle/mParticle*wETLMV*tf.cos(wPhis) Ek=energyInMeV+tf.cumsum(dE) energy=tf.concat([energyInMeV,Ek],0) betaLambdaM=BetaLambdaM(energy,freqMHz) dPhi_0=wPhis[1::]-wPhis[0:-1:]+pi dPhi_1=tf.expand_dims(wPhis[0]+pi, 0) dPhi_2=tf.expand_dims(wPhis[-1]+pi,0) dPhi=tf.concat([dPhi_1,dPhi_0,dPhi_2],0) lenCellM=betaLambdaM*dPhi/(2.*pi) return lenCellM
gpl-3.0
jdogewow/gamercoin
contrib/pyminer/pyminer.py
1257
6438
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file license.txt or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 8332 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
mit
Iv4n3r0h1n/2chcoin_wallet
contrib/pyminer/pyminer.py
385
6434
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 8332 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
mit
Filechaser/nzbToMedia
libs/unidecode/x0c2.py
253
4710
data = ( 'syon', # 0x00 'syonj', # 0x01 'syonh', # 0x02 'syod', # 0x03 'syol', # 0x04 'syolg', # 0x05 'syolm', # 0x06 'syolb', # 0x07 'syols', # 0x08 'syolt', # 0x09 'syolp', # 0x0a 'syolh', # 0x0b 'syom', # 0x0c 'syob', # 0x0d 'syobs', # 0x0e 'syos', # 0x0f 'syoss', # 0x10 'syong', # 0x11 'syoj', # 0x12 'syoc', # 0x13 'syok', # 0x14 'syot', # 0x15 'syop', # 0x16 'syoh', # 0x17 'su', # 0x18 'sug', # 0x19 'sugg', # 0x1a 'sugs', # 0x1b 'sun', # 0x1c 'sunj', # 0x1d 'sunh', # 0x1e 'sud', # 0x1f 'sul', # 0x20 'sulg', # 0x21 'sulm', # 0x22 'sulb', # 0x23 'suls', # 0x24 'sult', # 0x25 'sulp', # 0x26 'sulh', # 0x27 'sum', # 0x28 'sub', # 0x29 'subs', # 0x2a 'sus', # 0x2b 'suss', # 0x2c 'sung', # 0x2d 'suj', # 0x2e 'suc', # 0x2f 'suk', # 0x30 'sut', # 0x31 'sup', # 0x32 'suh', # 0x33 'sweo', # 0x34 'sweog', # 0x35 'sweogg', # 0x36 'sweogs', # 0x37 'sweon', # 0x38 'sweonj', # 0x39 'sweonh', # 0x3a 'sweod', # 0x3b 'sweol', # 0x3c 'sweolg', # 0x3d 'sweolm', # 0x3e 'sweolb', # 0x3f 'sweols', # 0x40 'sweolt', # 0x41 'sweolp', # 0x42 'sweolh', # 0x43 'sweom', # 0x44 'sweob', # 0x45 'sweobs', # 0x46 'sweos', # 0x47 'sweoss', # 0x48 'sweong', # 0x49 'sweoj', # 0x4a 'sweoc', # 0x4b 'sweok', # 0x4c 'sweot', # 0x4d 'sweop', # 0x4e 'sweoh', # 0x4f 'swe', # 0x50 'sweg', # 0x51 'swegg', # 0x52 'swegs', # 0x53 'swen', # 0x54 'swenj', # 0x55 'swenh', # 0x56 'swed', # 0x57 'swel', # 0x58 'swelg', # 0x59 'swelm', # 0x5a 'swelb', # 0x5b 'swels', # 0x5c 'swelt', # 0x5d 'swelp', # 0x5e 'swelh', # 0x5f 'swem', # 0x60 'sweb', # 0x61 'swebs', # 0x62 'swes', # 0x63 'swess', # 0x64 'sweng', # 0x65 'swej', # 0x66 'swec', # 0x67 'swek', # 0x68 'swet', # 0x69 'swep', # 0x6a 'sweh', # 0x6b 'swi', # 0x6c 'swig', # 0x6d 'swigg', # 0x6e 'swigs', # 0x6f 'swin', # 0x70 'swinj', # 0x71 'swinh', # 0x72 'swid', # 0x73 'swil', # 0x74 'swilg', # 0x75 'swilm', # 0x76 'swilb', # 0x77 'swils', # 0x78 'swilt', # 0x79 'swilp', # 0x7a 'swilh', # 0x7b 'swim', # 0x7c 'swib', # 0x7d 'swibs', # 0x7e 'swis', # 0x7f 'swiss', # 0x80 'swing', # 0x81 'swij', # 0x82 'swic', # 0x83 'swik', # 0x84 'swit', # 0x85 'swip', # 0x86 'swih', # 0x87 'syu', # 0x88 'syug', # 0x89 'syugg', # 0x8a 'syugs', # 0x8b 'syun', # 0x8c 'syunj', # 0x8d 'syunh', # 0x8e 'syud', # 0x8f 'syul', # 0x90 'syulg', # 0x91 'syulm', # 0x92 'syulb', # 0x93 'syuls', # 0x94 'syult', # 0x95 'syulp', # 0x96 'syulh', # 0x97 'syum', # 0x98 'syub', # 0x99 'syubs', # 0x9a 'syus', # 0x9b 'syuss', # 0x9c 'syung', # 0x9d 'syuj', # 0x9e 'syuc', # 0x9f 'syuk', # 0xa0 'syut', # 0xa1 'syup', # 0xa2 'syuh', # 0xa3 'seu', # 0xa4 'seug', # 0xa5 'seugg', # 0xa6 'seugs', # 0xa7 'seun', # 0xa8 'seunj', # 0xa9 'seunh', # 0xaa 'seud', # 0xab 'seul', # 0xac 'seulg', # 0xad 'seulm', # 0xae 'seulb', # 0xaf 'seuls', # 0xb0 'seult', # 0xb1 'seulp', # 0xb2 'seulh', # 0xb3 'seum', # 0xb4 'seub', # 0xb5 'seubs', # 0xb6 'seus', # 0xb7 'seuss', # 0xb8 'seung', # 0xb9 'seuj', # 0xba 'seuc', # 0xbb 'seuk', # 0xbc 'seut', # 0xbd 'seup', # 0xbe 'seuh', # 0xbf 'syi', # 0xc0 'syig', # 0xc1 'syigg', # 0xc2 'syigs', # 0xc3 'syin', # 0xc4 'syinj', # 0xc5 'syinh', # 0xc6 'syid', # 0xc7 'syil', # 0xc8 'syilg', # 0xc9 'syilm', # 0xca 'syilb', # 0xcb 'syils', # 0xcc 'syilt', # 0xcd 'syilp', # 0xce 'syilh', # 0xcf 'syim', # 0xd0 'syib', # 0xd1 'syibs', # 0xd2 'syis', # 0xd3 'syiss', # 0xd4 'sying', # 0xd5 'syij', # 0xd6 'syic', # 0xd7 'syik', # 0xd8 'syit', # 0xd9 'syip', # 0xda 'syih', # 0xdb 'si', # 0xdc 'sig', # 0xdd 'sigg', # 0xde 'sigs', # 0xdf 'sin', # 0xe0 'sinj', # 0xe1 'sinh', # 0xe2 'sid', # 0xe3 'sil', # 0xe4 'silg', # 0xe5 'silm', # 0xe6 'silb', # 0xe7 'sils', # 0xe8 'silt', # 0xe9 'silp', # 0xea 'silh', # 0xeb 'sim', # 0xec 'sib', # 0xed 'sibs', # 0xee 'sis', # 0xef 'siss', # 0xf0 'sing', # 0xf1 'sij', # 0xf2 'sic', # 0xf3 'sik', # 0xf4 'sit', # 0xf5 'sip', # 0xf6 'sih', # 0xf7 'ssa', # 0xf8 'ssag', # 0xf9 'ssagg', # 0xfa 'ssags', # 0xfb 'ssan', # 0xfc 'ssanj', # 0xfd 'ssanh', # 0xfe 'ssad', # 0xff )
gpl-3.0
yoer/hue
desktop/core/ext-py/pyasn1-0.1.8/test/codec/cer/test_encoder.py
23
6061
from pyasn1.type import namedtype, univ, useful from pyasn1.codec.cer import encoder from pyasn1.compat.octets import ints2octs from pyasn1.error import PyAsn1Error from sys import version_info if version_info[0:2] < (2, 7) or \ version_info[0:2] in ( (3, 0), (3, 1) ): try: import unittest2 as unittest except ImportError: import unittest else: import unittest class BooleanEncoderTestCase(unittest.TestCase): def testTrue(self): assert encoder.encode(univ.Boolean(1)) == ints2octs((1, 1, 255)) def testFalse(self): assert encoder.encode(univ.Boolean(0)) == ints2octs((1, 1, 0)) class BitStringEncoderTestCase(unittest.TestCase): def testShortMode(self): assert encoder.encode( univ.BitString((1,0)*501) ) == ints2octs((3, 127, 6) + (170,) * 125 + (128,)) def testLongMode(self): assert encoder.encode( univ.BitString((1,0)*501) ) == ints2octs((3, 127, 6) + (170,) * 125 + (128,)) class OctetStringEncoderTestCase(unittest.TestCase): def testShortMode(self): assert encoder.encode( univ.OctetString('Quick brown fox') ) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120)) def testLongMode(self): assert encoder.encode( univ.OctetString('Q'*1001) ) == ints2octs((36, 128, 4, 130, 3, 232) + (81,)*1000 + (4, 1, 81, 0, 0)) class SetEncoderTestCase(unittest.TestCase): def setUp(self): self.s = univ.Set(componentType=namedtype.NamedTypes( namedtype.NamedType('place-holder', univ.Null('')), namedtype.OptionalNamedType('first-name', univ.OctetString('')), namedtype.DefaultedNamedType('age', univ.Integer(33)) )) def __init(self): self.s.clear() self.s.setComponentByPosition(0) def __initWithOptional(self): self.s.clear() self.s.setComponentByPosition(0) self.s.setComponentByPosition(1, 'quick brown') def __initWithDefaulted(self): self.s.clear() self.s.setComponentByPosition(0) self.s.setComponentByPosition(2, 1) def __initWithOptionalAndDefaulted(self): self.s.clear() self.s.setComponentByPosition(0, univ.Null('')) self.s.setComponentByPosition(1, univ.OctetString('quick brown')) self.s.setComponentByPosition(2, univ.Integer(1)) def testIndefMode(self): self.__init() assert encoder.encode(self.s) == ints2octs((49, 128, 5, 0, 0, 0)) def testWithOptionalIndefMode(self): self.__initWithOptional() assert encoder.encode( self.s ) == ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0)) def testWithDefaultedIndefMode(self): self.__initWithDefaulted() assert encoder.encode( self.s ) == ints2octs((49, 128, 2, 1, 1, 5, 0, 0, 0)) def testWithOptionalAndDefaultedIndefMode(self): self.__initWithOptionalAndDefaulted() assert encoder.encode( self.s ) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0)) class SetWithChoiceEncoderTestCase(unittest.TestCase): def setUp(self): c = univ.Choice(componentType=namedtype.NamedTypes( namedtype.NamedType('actual', univ.Boolean(0)) )) self.s = univ.Set(componentType=namedtype.NamedTypes( namedtype.NamedType('place-holder', univ.Null('')), namedtype.NamedType('status', c) )) def testIndefMode(self): self.s.setComponentByPosition(0) self.s.setComponentByName('status') self.s.getComponentByName('status').setComponentByPosition(0, 1) assert encoder.encode(self.s) == ints2octs((49, 128, 1, 1, 255, 5, 0, 0, 0)) class GeneralizedTimeEncoderTestCase(unittest.TestCase): # def testExtraZeroInSeconds(self): # try: # assert encoder.encode( # useful.GeneralizedTime('20150501120112.10Z') # ) # except PyAsn1Error: # pass # else: # assert 0, 'Meaningless trailing zero in fraction part tolerated' def testLocalTimezone(self): try: assert encoder.encode( useful.GeneralizedTime('20150501120112.1+0200') ) except PyAsn1Error: pass else: assert 0, 'Local timezone tolerated' def testMissingTimezone(self): try: assert encoder.encode( useful.GeneralizedTime('20150501120112.1') ) except PyAsn1Error: pass else: assert 0, 'Missing timezone tolerated' # When enabled, this breaks many existing encodings # # def testDecimalPoint(self): # try: # assert encoder.encode( # useful.GeneralizedTime('20150501120112Z') # ) # except PyAsn1Error: # pass # else: # assert 0, 'Missing decimal point tolerated' class UTCTimeEncoderTestCase(unittest.TestCase): def testFractionOfSecond(self): try: assert encoder.encode( useful.UTCTime('150501120112.10Z') ) except PyAsn1Error: pass else: assert 0, 'Decimal point tolerated' def testMissingTimezone(self): assert encoder.encode( useful.UTCTime('150501120112') ) == ints2octs((23, 13, 49, 53, 48, 53, 48, 49, 49, 50, 48, 49, 49, 50, 90)), 'Missing timezone not added' def testLocalTimezone(self): try: assert encoder.encode( useful.UTCTime('150501120112+0200') ) except PyAsn1Error: pass else: assert 0, 'Local timezone tolerated' if __name__ == '__main__': unittest.main()
apache-2.0
AlanD88/website
web2py/gluon/contrib/pyrtf/Renderer.py
44
26038
from types import StringType, ListType, TupleType from copy import deepcopy from Elements import * DEFAULT_TAB_WIDTH = 720 ParagraphAlignmentMap = { ParagraphPropertySet.LEFT : 'ql', ParagraphPropertySet.RIGHT : 'qr', ParagraphPropertySet.CENTER : 'qc', ParagraphPropertySet.JUSTIFY : 'qj', ParagraphPropertySet.DISTRIBUTE : 'qd' } TabAlignmentMap = { TabPropertySet.LEFT : '', TabPropertySet.RIGHT : 'tqr', TabPropertySet.CENTER : 'tqc', TabPropertySet.DECIMAL : 'tqdec' } TableAlignmentMap = { Table.LEFT : 'trql', Table.RIGHT : 'trqr', Table.CENTER : 'trqc' } CellAlignmentMap = { Cell.ALIGN_TOP : '', # clvertalt Cell.ALIGN_CENTER : 'clvertalc', Cell.ALIGN_BOTTOM : 'clvertalb' } CellFlowMap = { Cell.FLOW_LR_TB : '', # cltxlrtb, Text in a cell flows from left to right and top to bottom (default) Cell.FLOW_RL_TB : 'cltxtbrl', # Text in a cell flows right to left and top to bottom Cell.FLOW_LR_BT : 'cltxbtlr', # Text in a cell flows left to right and bottom to top Cell.FLOW_VERTICAL_LR_TB : 'cltxlrtbv', # Text in a cell flows left to right and top to bottom, vertical Cell.FLOW_VERTICAL_TB_RL : 'cltxtbrlv' } # Text in a cell flows top to bottom and right to left, vertical ShadingPatternMap = { ShadingPropertySet.HORIZONTAL : 'bghoriz', ShadingPropertySet.VERTICAL : 'bgvert', ShadingPropertySet.FORWARD_DIAGONAL : 'bgfdiag', ShadingPropertySet.BACKWARD_DIAGONAL : 'bgbdiag', ShadingPropertySet.VERTICAL_CROSS : 'bgcross', ShadingPropertySet.DIAGONAL_CROSS : 'bgdcross', ShadingPropertySet.DARK_HORIZONTAL : 'bgdkhoriz', ShadingPropertySet.DARK_VERTICAL : 'bgdkvert', ShadingPropertySet.DARK_FORWARD_DIAGONAL : 'bgdkfdiag', ShadingPropertySet.DARK_BACKWARD_DIAGONAL : 'bgdkbdiag', ShadingPropertySet.DARK_VERTICAL_CROSS : 'bgdkcross', ShadingPropertySet.DARK_DIAGONAL_CROSS : 'bgdkdcross' } TabLeaderMap = { TabPropertySet.DOTS : 'tldot', TabPropertySet.HYPHENS : 'tlhyph', TabPropertySet.UNDERLINE : 'tlul', TabPropertySet.THICK_LINE : 'tlth', TabPropertySet.EQUAL_SIGN : 'tleq' } BorderStyleMap = { BorderPropertySet.SINGLE : 'brdrs', BorderPropertySet.DOUBLE : 'brdrth', BorderPropertySet.SHADOWED : 'brdrsh', BorderPropertySet.DOUBLED : 'brdrdb', BorderPropertySet.DOTTED : 'brdrdot', BorderPropertySet.DASHED : 'brdrdash', BorderPropertySet.HAIRLINE : 'brdrhair' } SectionBreakTypeMap = { Section.NONE : 'sbknone', Section.COLUMN : 'sbkcol', Section.PAGE : 'sbkpage', Section.EVEN : 'sbkeven', Section.ODD : 'sbkodd' } class Settings( list ) : def __init__( self ) : super( Settings, self ).__init__() self._append = super( Settings, self ).append def append( self, value, mask=None, fallback=None ) : if (value is not 0) and value in [ False, None, '' ] : if fallback : self._append( self, fallback ) else : if mask : if value is True : value = mask else : value = mask % value self._append( value ) def Join( self ) : if self : return r'\%s' % '\\'.join( self ) return '' def __repr__( self ) : return self.Join() class Renderer : def __init__( self, write_custom_element_callback=None ) : self.character_style_map = {} self.paragraph_style_map = {} self.WriteCustomElement = write_custom_element_callback # # All of the Rend* Functions populate a Settings object with values # def _RendPageProperties( self, section, settings, in_section ) : # this one is different from the others as it takes the settings from a if in_section : #paper_size_code = 'psz%s' paper_width_code = 'pgwsxn%s' paper_height_code = 'pghsxn%s' landscape = 'lndscpsxn' margin_suffix = 'sxn' else : #paper_size_code = 'psz%s' paper_width_code = 'paperw%s' paper_height_code = 'paperh%s' landscape = 'landscape' margin_suffix = '' #settings.append( section.Paper.Code, paper_size_code ) settings.append( section.Paper.Width, paper_width_code ) settings.append( section.Paper.Height, paper_height_code ) if section.Landscape : settings.append( landscape ) if section.FirstPageNumber : settings.append( section.FirstPageNumber, 'pgnstarts%s' ) settings.append( 'pgnrestart' ) self._RendMarginsPropertySet( section.Margins, settings, margin_suffix ) def _RendShadingPropertySet( self, shading_props, settings, prefix='' ) : if not shading_props : return settings.append( shading_props.Shading, prefix + 'shading%s' ) settings.append( ShadingPatternMap.get( shading_props.Pattern, False ) ) settings.append( self._colour_map.get( shading_props.Foreground, False ), prefix + 'cfpat%s' ) settings.append( self._colour_map.get( shading_props.Background, False ), prefix + 'cbpat%s' ) def _RendBorderPropertySet( self, edge_props, settings ) : settings.append( BorderStyleMap[ edge_props.Style ] ) settings.append( edge_props.Width , 'brdrw%s' ) settings.append( self._colour_map.get( edge_props.Colour, False ), 'brdrcf%s' ) settings.append( edge_props.Spacing or False , 'brsp%s' ) def _RendFramePropertySet( self, frame_props, settings, tag_prefix='' ) : if not frame_props : return if frame_props.Top : settings.append( tag_prefix + 'brdrt' ) self._RendBorderPropertySet( frame_props.Top, settings ) if frame_props.Left : settings.append( tag_prefix + 'brdrl' ) self._RendBorderPropertySet( frame_props.Left, settings ) if frame_props.Bottom : settings.append( tag_prefix + 'brdrb' ) self._RendBorderPropertySet( frame_props.Bottom, settings ) if frame_props.Right : settings.append( tag_prefix + 'brdrr' ) self._RendBorderPropertySet( frame_props.Right, settings ) def _RendMarginsPropertySet( self, margin_props, settings, suffix='' ) : if not margin_props : return settings.append( margin_props.Top, 'margt' + suffix + '%s' ) settings.append( margin_props.Left, 'margl' + suffix + '%s' ) settings.append( margin_props.Bottom, 'margb' + suffix + '%s' ) settings.append( margin_props.Right, 'margr' + suffix + '%s' ) def _RendParagraphPropertySet( self, paragraph_props, settings ) : if not paragraph_props : return settings.append( ParagraphAlignmentMap[ paragraph_props.Alignment ] ) settings.append( paragraph_props.SpaceBefore, 'sb%s' ) settings.append( paragraph_props.SpaceAfter, 'sa%s' ) # then we have to find out all of the tabs width = 0 for tab in paragraph_props.Tabs : settings.append( TabAlignmentMap[ tab.Alignment ] ) settings.append( TabLeaderMap.get( tab.Leader, '' ) ) width += tab.Width or DEFAULT_TAB_WIDTH settings.append( 'tx%s' % width ) settings.append( paragraph_props.PageBreakBefore, 'pagebb' ) settings.append( paragraph_props.FirstLineIndent, 'fi%s' ) settings.append( paragraph_props.LeftIndent, 'li%s' ) settings.append( paragraph_props.RightIndent, 'ri%s' ) if paragraph_props.SpaceBetweenLines : if paragraph_props.SpaceBetweenLines < 0 : settings.append( paragraph_props.SpaceBetweenLines, r'sl%s\slmult0' ) else : settings.append( paragraph_props.SpaceBetweenLines, r'sl%s\slmult1' ) def _RendTextPropertySet( self, text_props, settings ) : if not text_props : return if text_props.Expansion : settings.append( text_props.Expansion, 'expndtw%s' ) settings.append( text_props.Bold, 'b' ) settings.append( text_props.Italic, 'i' ) settings.append( text_props.Underline, 'ul' ) settings.append( text_props.DottedUnderline, 'uld' ) settings.append( text_props.DoubleUnderline, 'uldb' ) settings.append( text_props.WordUnderline, 'ulw' ) settings.append( self._font_map.get( text_props.Font, False ), 'f%s' ) settings.append( text_props.Size, 'fs%s' ) settings.append( self._colour_map.get( text_props.Colour, False ), 'cf%s' ) if text_props.Frame : frame = text_props.Frame settings.append( 'chbrdr' ) settings.append( BorderStyleMap[ frame.Style ] ) settings.append( frame.Width , 'brdrw%s' ) settings.append( self._colour_map.get( frame.Colour, False ), 'brdrcf%s' ) # # All of the Write* functions will write to the internal file object # # the _ ones probably don't need to be used by anybody outside # but the other ones like WriteTextElement could be used in the Custom # callback. def Write( self, document, fout ) : # write all of the standard stuff based upon the first document self._doc = document self._fout = fout self._WriteDocument () self._WriteColours () self._WriteFonts () self._WriteStyleSheet() settings = Settings() self._RendPageProperties( self._doc.Sections[ 0 ], settings, in_section=False ) self._write( repr( settings ) ) # handle the simplest case first, we don't need to do anymore mucking around # with section headers, etc we can just rip the document out if len( document.Sections ) == 1 : self._WriteSection( document.Sections[ 0 ], is_first = True, add_header = False ) else : for section_idx, section in enumerate( document.Sections ) : is_first = section_idx == 0 add_header = True self._WriteSection( section, is_first, add_header ) self._write( '}' ) del self._fout, self._doc, self._CurrentStyle def _write( self, data, *params ) : #---------------------------------- # begin modification # by Herbert Weinhandl # to convert accented characters # to their rtf-compatible form #for c in range( 128, 256 ) : # data = data.replace( chr(c), "\'%x" % c) # end modification # # This isn't the right place for this as it is going to do # this loop for all sorts of writes, including settings, control codes, etc. # # I will create a def _WriteText (or something) method that is used when the # actual string that is to be viewed in the document is written, this can then # do the final accented character check. # # I left it here so that I remember to do the right thing when I have time #---------------------------------- if params : data = data % params self._fout.write( data ) def _WriteDocument( self ) : settings = Settings() assert Languages.IsValid ( self._doc.DefaultLanguage ) assert ViewKind.IsValid ( self._doc.ViewKind ) assert ViewZoomKind.IsValid( self._doc.ViewZoomKind ) assert ViewScale.IsValid ( self._doc.ViewScale ) settings.append( self._doc.DefaultLanguage, 'deflang%s' ) settings.append( self._doc.ViewKind , 'viewkind%s' ) settings.append( self._doc.ViewZoomKind , 'viewzk%s' ) settings.append( self._doc.ViewScale , 'viewscale%s' ) self._write( "{\\rtf1\\ansi\\ansicpg1252\\deff0%s\n" % settings ) def _WriteColours( self ) : self._write( r"{\colortbl ;" ) self._colour_map = {} offset = 0 for colour in self._doc.StyleSheet.Colours : self._write( r'\red%s\green%s\blue%s;', colour.Red, colour.Green, colour.Blue ) self._colour_map[ colour ] = offset + 1 offset += 1 self._write( "}\n" ) def _WriteFonts( self ) : self._write( r'{\fonttbl' ) self._font_map = {} offset = 0 for font in self._doc.StyleSheet.Fonts : pitch = '' panose = '' alternate = '' if font.Pitch : pitch = r'\fprq%s' % font.Pitch if font.Panose : panose = r'{\*\panose %s}' % font.Panose if font.Alternate : alternate = r'{\*\falt %s}' % font.Alternate.Name self._write( r'{\f%s\f%s%s\fcharset%s%s %s%s;}', offset, font.Family, pitch, font.CharacterSet, panose, font.Name, alternate ) self._font_map[ font ] = offset offset += 1 self._write( "}\n" ) def _WriteStyleSheet( self ) : self._write( r"{\stylesheet" ) # TO DO: character styles, does anybody actually use them? offset_map = {} for idx, style in enumerate( self._doc.StyleSheet.ParagraphStyles ) : offset_map[ style ] = idx # paragraph styles self.paragraph_style_map = {} for idx, style in enumerate( self._doc.StyleSheet.ParagraphStyles ) : if idx == 0 : default = style else : self._write( '\n' ) settings = Settings() # paragraph properties self._RendParagraphPropertySet( style.ParagraphPropertySet, settings ) self._RendFramePropertySet ( style.FramePropertySet, settings ) self._RendShadingPropertySet ( style.ShadingPropertySet, settings ) # text properties self._RendTextPropertySet ( style.TextStyle.TextPropertySet, settings ) self._RendShadingPropertySet( style.TextStyle.ShadingPropertySet, settings ) # have to take based_on = '\\sbasedon%s' % offset_map.get( style.BasedOn, 0 ) next = '\\snext%s' % offset_map.get( style.Next, 0 ) inln = '\\s%s%s' % ( idx, settings ) self._write( "{%s%s%s %s;}", inln, based_on, next, style.Name ) self.paragraph_style_map[ style ] = inln # if now style is specified for the first paragraph to be written, this one # will be used self._CurrentStyle = self.paragraph_style_map[ default ] self._write( "}\n" ) def _WriteSection( self, section, is_first, add_header ) : def WriteHF( hf, rtfword ) : #if not hf : return # if we don't have anything in the header/footer then include # a blank paragraph, this stops it from picking up the header/footer # from the previous section # if not hf : hf = [ Paragraph( '' ) ] if not hf : hf = [] self._write( '{\\%s' % rtfword ) self._WriteElements( hf ) self._write( '}\n' ) settings = Settings() if not is_first : # we need to finish off the preceding section # and reset all of our defaults back to standard settings.append( 'sect' ) # reset to our defaults settings.append( 'sectd' ) if add_header : settings.append( SectionBreakTypeMap[ section.BreakType ] ) self._RendPageProperties( section, settings, in_section=True ) settings.append( section.HeaderY, 'headery%s' ) settings.append( section.FooterY, 'footery%s' ) # write all of these out now as we need to do a write elements in the # next section self._write( repr( settings ) ) # finally after all that has settled down we can do the # headers and footers if section.FirstHeader or section.FirstFooter : # include the titlepg flag if the first page has a special format self._write( r'\titlepg' ) WriteHF( section.FirstHeader, 'headerf' ) WriteHF( section.FirstFooter, 'footerf' ) WriteHF( section.Header, 'header' ) WriteHF( section.Footer, 'footer' ) # and at last the contents of the section that actually appear on the page self._WriteElements( section ) def _WriteElements( self, elements ) : new_line = '' for element in elements : self._write( new_line ) new_line = '\n' clss = element.__class__ if clss == Paragraph : self.WriteParagraphElement( element ) elif clss == Table : self.WriteTableElement( element ) elif clss == StringType : self.WriteParagraphElement( Paragraph( element ) ) elif clss in [ RawCode, Image ] : self.WriteRawCode( element ) #elif clss == List : # self._HandleListElement( element ) elif self.WriteCustomElement : self.WriteCustomElement( self, element ) else : raise Exception( "Don't know how to handle elements of type %s" % clss ) def WriteParagraphElement( self, paragraph_elem, tag_prefix='', tag_suffix=r'\par', opening='{', closing='}' ) : # the tag_prefix and the tag_suffix take care of paragraphs in tables. A # paragraph in a table requires and extra tag at the front (intbl) and we # don't want the ending tag everytime. We want it for all paragraphs but # the last. overrides = Settings() self._RendParagraphPropertySet( paragraph_elem.Properties, overrides ) self._RendFramePropertySet ( paragraph_elem.Frame, overrides ) self._RendShadingPropertySet ( paragraph_elem.Shading, overrides ) # when writing the RTF the style is carried from the previous paragraph to the next, # so if the currently written paragraph has a style then make it the current one, # otherwise leave it as it was self._CurrentStyle = self.paragraph_style_map.get( paragraph_elem.Style, self._CurrentStyle ) self._write( r'%s\pard\plain%s %s%s ' % ( opening, tag_prefix, self._CurrentStyle, overrides ) ) for element in paragraph_elem : if isinstance( element, StringType ) : self._write( element ) elif isinstance( element, RawCode ) : self._write( element.Data ) elif isinstance( element, Text ) : self.WriteTextElement( element ) elif isinstance( element, Inline ) : self.WriteInlineElement( element ) elif element == TAB : self._write( r'\tab ' ) elif element == LINE : self._write( r'\line ' ) elif self.WriteCustomElement : self.WriteCustomElement( self, element ) else : raise Exception( 'Don\'t know how to handle %s' % element ) self._write( tag_suffix + closing ) def WriteRawCode( self, raw_elem ) : self._write( raw_elem.Data ) def WriteTextElement( self, text_elem ) : overrides = Settings() self._RendTextPropertySet ( text_elem.Properties, overrides ) self._RendShadingPropertySet( text_elem.Shading, overrides, 'ch' ) # write the wrapper and then let the custom handler have a go if overrides : self._write( '{%s ' % repr( overrides ) ) # if the data is just a string then we can now write it if isinstance( text_elem.Data, StringType ) : self._write( text_elem.Data or '' ) elif text_elem.Data == TAB : self._write( r'\tab ' ) else : self.WriteCustomElement( self, text_elem.Data ) if overrides : self._write( '}' ) def WriteInlineElement( self, inline_elem ) : overrides = Settings() self._RendTextPropertySet ( inline_elem.Properties, overrides ) self._RendShadingPropertySet( inline_elem.Shading, overrides, 'ch' ) # write the wrapper and then let the custom handler have a go if overrides : self._write( '{%s ' % repr( overrides ) ) for element in inline_elem : # if the data is just a string then we can now write it if isinstance( element, StringType ) : self._write( element ) elif isinstance( element, RawCode ) : self._write( element.Data ) elif element == TAB : self._write( r'\tab ' ) elif element == LINE : self._write( r'\line ' ) else : self.WriteCustomElement( self, element ) if overrides : self._write( '}' ) def WriteText( self, text ) : self._write( text or '' ) def WriteTableElement( self, table_elem ) : vmerge = [ False ] * table_elem.ColumnCount for height, cells in table_elem.Rows : # calculate the right hand edge of the cells taking into account the spans offset = table_elem.LeftOffset or 0 cellx = [] cell_idx = 0 for cell in cells : cellx.append( offset + sum( table_elem.ColumnWidths[ : cell_idx + cell.Span ] ) ) cell_idx += cell.Span self._write( r'{\trowd' ) settings = Settings() # the spec says that this value is mandatory and I think that 108 is the default value # so I'll take care of it here settings.append( table_elem.GapBetweenCells or 108, 'trgaph%s' ) settings.append( TableAlignmentMap[ table_elem.Alignment ] ) settings.append( height, 'trrh%s' ) settings.append( table_elem.LeftOffset, 'trleft%s' ) width = table_elem.LeftOffset or 0 for idx, cell in enumerate( cells ) : self._RendFramePropertySet ( cell.Frame, settings, 'cl' ) # cells don't have margins so I don't know why I was doing this # I think it might have an affect in some versions of some WPs. #self._RendMarginsPropertySet( cell.Margins, settings, 'cl' ) # if we are starting to merge or if this one is the first in what is # probably a series of merges then start the vertical merging if cell.StartVerticalMerge or (cell.VerticalMerge and not vmerge[ idx ]) : settings.append( 'clvmgf' ) vmerge[ idx ] = True elif cell.VerticalMerge : #..continuing a merge settings.append( 'clvmrg' ) else : #..no merging going on so make sure that it is off vmerge[ idx ] = False # for any cell in the next row that is covered by this span we # need to run off the vertical merging as we don't want them # merging up into this spanned cell for vmerge_idx in range( idx + 1, idx + cell.Span - 1 ) : vmerge[ vmerge_idx ] = False settings.append( CellAlignmentMap[ cell.Alignment ] ) settings.append( CellFlowMap[ cell.Flow ] ) # this terminates the definition of a cell and represents the right most edge of the cell from the left margin settings.append( cellx[ idx ], 'cellx%s' ) self._write( repr( settings ) ) for cell in cells : if len( cell ) : last_idx = len( cell ) - 1 for element_idx, element in enumerate( cell ) : # wrap plain strings in paragraph tags if isinstance( element, StringType ) : element = Paragraph( element ) # don't forget the prefix or else word crashes and does all sorts of strange things if element_idx == last_idx : self.WriteParagraphElement( element, tag_prefix=r'\intbl', tag_suffix='', opening='', closing='' ) else : self.WriteParagraphElement( element, tag_prefix=r'\intbl', opening='', closing='' ) self._write( r'\cell' ) else : self._write( r'\pard\intbl\cell' ) self._write( '\\row}\n' )
mit
spcui/tp-qemu
qemu/tests/sr_iov_boot_negative.py
7
1327
import logging from autotest.client.shared import error from virttest import env_process @error.context_aware def run(test, params, env): """ KVM boot with negative parameter test: 1) Try to boot VM with negative parameters. 2) Verify that qemu could handle the negative parameters. Check the negative message (optional) :param test: qemu test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ neg_msg = params.get("negative_msg") if params.get("start_vm") == "yes": raise error.TestError("Please set start_vm to no") params["start_vm"] = "yes" try: error.context("Try to boot VM with negative parameters", logging.info) case_fail = False env_process.preprocess_vm(test, params, env, params.get("main_vm")) case_fail = True except Exception, e: if neg_msg: error.context("Check qemu-qemu error message", logging.info) if neg_msg not in str(e): msg = "Could not find '%s' in error message '%s'" % ( neg_msg, e) raise error.TestFail(msg) logging.debug("Could not boot up vm, %s" % e) if case_fail: raise error.TestFail("Did not raise exception during vm boot up")
gpl-2.0
hybrideagle/django
tests/schema/models.py
237
4275
from django.apps.registry import Apps from django.db import models from django.utils.encoding import python_2_unicode_compatible # Because we want to test creation and deletion of these as separate things, # these models are all inserted into a separate Apps so the main test # runner doesn't migrate them. new_apps = Apps() class Author(models.Model): name = models.CharField(max_length=255) height = models.PositiveIntegerField(null=True, blank=True) class Meta: apps = new_apps class AuthorWithDefaultHeight(models.Model): name = models.CharField(max_length=255) height = models.PositiveIntegerField(null=True, blank=True, default=42) class Meta: apps = new_apps class AuthorWithEvenLongerName(models.Model): name = models.CharField(max_length=255) height = models.PositiveIntegerField(null=True, blank=True) class Meta: apps = new_apps class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) title = models.CharField(max_length=100, db_index=True) pub_date = models.DateTimeField() # tags = models.ManyToManyField("Tag", related_name="books") class Meta: apps = new_apps class BookWeak(models.Model): author = models.ForeignKey(Author, models.CASCADE, db_constraint=False) title = models.CharField(max_length=100, db_index=True) pub_date = models.DateTimeField() class Meta: apps = new_apps class BookWithLongName(models.Model): author_foreign_key_with_really_long_field_name = models.ForeignKey( AuthorWithEvenLongerName, models.CASCADE, ) class Meta: apps = new_apps class BookWithO2O(models.Model): author = models.OneToOneField(Author, models.CASCADE) title = models.CharField(max_length=100, db_index=True) pub_date = models.DateTimeField() class Meta: apps = new_apps db_table = "schema_book" class BookWithSlug(models.Model): author = models.ForeignKey(Author, models.CASCADE) title = models.CharField(max_length=100, db_index=True) pub_date = models.DateTimeField() slug = models.CharField(max_length=20, unique=True) class Meta: apps = new_apps db_table = "schema_book" class BookWithoutAuthor(models.Model): title = models.CharField(max_length=100, db_index=True) pub_date = models.DateTimeField() class Meta: apps = new_apps db_table = "schema_book" class BookForeignObj(models.Model): title = models.CharField(max_length=100, db_index=True) author_id = models.IntegerField() class Meta: apps = new_apps class IntegerPK(models.Model): i = models.IntegerField(primary_key=True) j = models.IntegerField(unique=True) class Meta: apps = new_apps db_table = "INTEGERPK" # uppercase to ensure proper quoting class Note(models.Model): info = models.TextField() class Meta: apps = new_apps class NoteRename(models.Model): detail_info = models.TextField() class Meta: apps = new_apps db_table = "schema_note" class Tag(models.Model): title = models.CharField(max_length=255) slug = models.SlugField(unique=True) class Meta: apps = new_apps class TagIndexed(models.Model): title = models.CharField(max_length=255) slug = models.SlugField(unique=True) class Meta: apps = new_apps index_together = [["slug", "title"]] class TagM2MTest(models.Model): title = models.CharField(max_length=255) slug = models.SlugField(unique=True) class Meta: apps = new_apps class TagUniqueRename(models.Model): title = models.CharField(max_length=255) slug2 = models.SlugField(unique=True) class Meta: apps = new_apps db_table = "schema_tag" # Based on tests/reserved_names/models.py @python_2_unicode_compatible class Thing(models.Model): when = models.CharField(max_length=1, primary_key=True) class Meta: db_table = 'drop' def __str__(self): return self.when class UniqueTest(models.Model): year = models.IntegerField() slug = models.SlugField(unique=False) class Meta: apps = new_apps unique_together = ["year", "slug"]
bsd-3-clause
redhat-cip/python-tripleo-wrapper
rdomhelper/ssh.py
1
11391
# -*- coding: utf-8 -*- # # Copyright (C) 2016 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import paramiko from paramiko import ssh_exception import io import logging import select import time LOG = logging.getLogger('__chainsaw__') class SshClient(object): """SSH client based on Paramiko. This class implements the following features: - run commands on a remote host - send file to a remote host - redirect connection to another ssh server so that every commands will be executed on the redirected host - send files - create remote files """ def __init__(self, hostname, user, key_filename=None, via_ip=None): """:param hostname: the host on which to connect :type hostname: str :param user: the user to use for the connection :type user: str :param key_filename: the private key path to use, by default it will use the system host keys :type key_filename: str :param redirect_to_host: the host on which to redirect, by default it will use the port 22 :type redirect_to_host: str """ assert hostname, 'hostname is defined.' assert user, 'user is defined.' self._hostname = hostname self._user = user self._key_filename = key_filename self.load_private_key(key_filename) self._client = paramiko.SSHClient() self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self._via_ip = via_ip self._transport = None self._started = False self.description = 'not started yet' self._environment_filenames = [] def load_private_key(self, priv_key): """Register the SSH private key.""" with open(priv_key) as fd: self._private_key = paramiko.RSAKey.from_private_key(fd) def _get_transport_via_ip(self): exception = None for i in range(60): try: channel = self._client.get_transport().open_channel( 'direct-tcpip', (self._hostname, 22), (self._via_ip, 0)) except ssh_exception.ChannelException as exception: LOG.debug('%s creating the direct-tcip connections' % self.description) time.sleep(1) else: transport = paramiko.Transport(channel) transport.start_client() transport.auth_publickey(self._user, self._private_key) return transport raise exception def _get_transport(self): if self._via_ip: transport = self._get_transport_via_ip() else: transport = self._client.get_transport() transport.set_keepalive(10) return transport def start(self): """Start the ssh client and connect to the host. It will wait until the ssh service is available during 90 seconds. If it doesn't succed to connect then the function will raise an SSHException. """ if self._via_ip: connect_to = self._via_ip self.description = '[%s@%s via %s]' % (self._user, self._hostname, self._via_ip) else: connect_to = self._hostname self.description = '[%s@%s]' % (self._user, self._hostname) for i in range(60): try: self._client.connect( connect_to, username=self._user, allow_agent=True, key_filename=self._key_filename) # NOTE(Gonéri): TypeError is in the list because of # https://github.com/paramiko/paramiko/issues/615 self._transport = self._get_transport() except (OSError, TypeError, ssh_exception.SSHException, ssh_exception.NoValidConnectionsError) as e: LOG.info('%s waiting for %s' % (self.description, connect_to)) LOG.debug("exception: '%s'" % str(e)) time.sleep(1) else: LOG.debug('%s connected' % self.description) self._started = True return _error = ("unable to connect to ssh service on '%s'" % self._hostname) LOG.error(_error) raise ssh_exception.SSHException(_error) def _check_started(self): if not self._started: _error = "ssh client not started, please start the client" LOG.error(_error) raise ssh_exception.SSHException(_error) def stop(self): """Close the ssh connection.""" self._started = False self._client.close() def run(self, cmd, sudo=False, ignore_error=False, success_status=(0,), error_callback=None, custom_log=None): """Run a command on the remote host. The command is run on the remote host, if there is a redirected host then the command will be run on that redirected host. See __init__. :param cmd: the command to run :type cmd: str :param sudo: True if the command should be run with sudo, this parameter disable the use of environment files. :type sudo: str :param success_status: the list of the possible success status :type success_status: list :param error_callback: if provided, the callback to call in case of a failure. it will be called with two args, the output of the command and the returned error code. :return: the tuple (output of the command, returned code) :rtype: tuple :param custom_log: a optional string to record in the log instead of the command. This is useful for example if you want to hide a password. :type custom_log: str """ self._check_started() cmd_output = io.StringIO() channel = self._get_channel() if sudo: cmd = "sudo %s" % cmd else: for filename in self._environment_filenames: cmd = '. %s; %s' % (filename, cmd) if not custom_log: custom_log = cmd LOG.info("%s run '%s'" % (self.description, custom_log)) channel.exec_command(cmd) while True: if channel.exit_status_ready(): break rl, _, _ = select.select([channel], [], [], 30) if rl: received = channel.recv(1024).decode('UTF-8', 'ignore').strip() if received: LOG.debug(received) cmd_output.write(received) cmd_output = cmd_output.getvalue() exit_status = channel.exit_status if ignore_error or channel.exit_status in success_status: return cmd_output, channel.exit_status elif error_callback: return error_callback(cmd_output, exit_status) else: _error = ("%s command %s has failed with, rc='%s'" % (self.description, custom_log, exit_status)) LOG.error(_error) raise ssh_exception.SSHException(_error) def _get_channel(self): """Returns a channel according to if there is a redirection to do or not. """ channel = self._transport.open_session() channel.set_combine_stderr(True) channel.get_pty() return channel def send_file(self, local_path, remote_path): """Send a file to the remote host. :param local_path: the local path of the file :type local_path: str :param remote_path: the remote path of the file :type remote_path: str :return: the file attributes :rtype: paramiko.sftp_attr.SFTPAttributes """ self._check_started() sftp = paramiko.SFTPClient.from_transport(self._transport) return sftp.put(local_path, remote_path) def create_file(self, path, content, mode='w'): """Create a file with a content. :param path: the path of the file. :type path: str :param content: the content of the file :type content: str :param mode: the mode of the file while opening it :type mode: str """ self._check_started() sftp = paramiko.SFTPClient.from_transport(self._transport) with sftp.open(path, mode) as remote_file: remote_file.write(content) remote_file.flush() def info(self): return {'hostname': self._hostname, 'user': self._user, 'key_filename': self._key_filename} def add_environment_file(self, filename): self._environment_filenames.append(filename) class PoolSshClient(object): def __init__(self): self._ssh_clients = {} def build_ssh_client(self, hostname, user, key_filename=None, via_ip=None): _ssh_client = SshClient(hostname, user, key_filename, via_ip) _ssh_client.start() self._ssh_clients[user] = _ssh_client def add_ssh_client(self, user, ssh_client): self._ssh_clients[user] = ssh_client def del_ssh_client(self, user): self._check_ssh_client(user) del self._ssh_clients[user] def get_client(self, user): self._check_ssh_client(user) return self._ssh_clients[user] def _check_ssh_client(self, user): if user not in self._ssh_clients.keys(): _error = "ssh client for user %s not existing" % user LOG.error(_error) raise ssh_exception.SSHException(_error) def run(self, user, cmd, sudo=False, ignore_error=False, success_status=(0,), error_callback=None, custom_log=None): self._check_ssh_client(user) return self._ssh_clients[user].run( cmd, sudo=sudo, ignore_error=ignore_error, success_status=success_status, error_callback=error_callback, custom_log=custom_log) def send_file(self, user, local_path, remote_path): self._check_ssh_client(user) return self._ssh_clients[user].send_file(local_path, remote_path) def create_file(self, user, path, content, mode='w'): self._check_ssh_client(user) return self._ssh_clients[user].create_file(path, content, mode) def stop_all(self): for ssh_client in self._ssh_clients.values(): ssh_client.stop() def add_environment_file(self, user, filename): self._check_ssh_client(user) self._ssh_clients[user].add_environment_file(filename)
apache-2.0
johndpope/tensorflow
tensorflow/python/kernel_tests/distributions/dirichlet_multinomial_test.py
15
18254
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import dirichlet_multinomial from tensorflow.python.platform import test ds = dirichlet_multinomial class DirichletMultinomialTest(test.TestCase): def setUp(self): self._rng = np.random.RandomState(42) def testSimpleShapes(self): with self.test_session(): alpha = np.random.rand(3) dist = ds.DirichletMultinomial(1., alpha) self.assertEqual(3, dist.event_shape_tensor().eval()) self.assertAllEqual([], dist.batch_shape_tensor().eval()) self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape) self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape) def testComplexShapes(self): with self.test_session(): alpha = np.random.rand(3, 2, 2) n = [[3., 2], [4, 5], [6, 7]] dist = ds.DirichletMultinomial(n, alpha) self.assertEqual(2, dist.event_shape_tensor().eval()) self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval()) self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape) self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape) def testNproperty(self): alpha = [[1., 2, 3]] n = [[5.]] with self.test_session(): dist = ds.DirichletMultinomial(n, alpha) self.assertEqual([1, 1], dist.total_count.get_shape()) self.assertAllClose(n, dist.total_count.eval()) def testAlphaProperty(self): alpha = [[1., 2, 3]] with self.test_session(): dist = ds.DirichletMultinomial(1, alpha) self.assertEqual([1, 3], dist.concentration.get_shape()) self.assertAllClose(alpha, dist.concentration.eval()) def testPmfNandCountsAgree(self): alpha = [[1., 2, 3]] n = [[5.]] with self.test_session(): dist = ds.DirichletMultinomial(n, alpha, validate_args=True) dist.prob([2., 3, 0]).eval() dist.prob([3., 0, 2]).eval() with self.assertRaisesOpError("counts must be non-negative"): dist.prob([-1., 4, 2]).eval() with self.assertRaisesOpError( "counts last-dimension must sum to `self.total_count`"): dist.prob([3., 3, 0]).eval() def testPmfNonIntegerCounts(self): alpha = [[1., 2, 3]] n = [[5.]] with self.test_session(): dist = ds.DirichletMultinomial(n, alpha, validate_args=True) dist.prob([2., 3, 0]).eval() dist.prob([3., 0, 2]).eval() dist.prob([3.0, 0, 2.0]).eval() # Both equality and integer checking fail. placeholder = array_ops.placeholder(dtypes.float32) with self.assertRaisesOpError( "counts cannot contain fractional components"): dist.prob(placeholder).eval(feed_dict={placeholder: [1.0, 2.5, 1.5]}) dist = ds.DirichletMultinomial(n, alpha, validate_args=False) dist.prob([1., 2., 3.]).eval() # Non-integer arguments work. dist.prob([1.0, 2.5, 1.5]).eval() def testPmfBothZeroBatches(self): # The probabilities of one vote falling into class k is the mean for class # k. with self.test_session(): # Both zero-batches. No broadcast alpha = [1., 2] counts = [1., 0] dist = ds.DirichletMultinomial(1., alpha) pmf = dist.prob(counts) self.assertAllClose(1 / 3., pmf.eval()) self.assertEqual((), pmf.get_shape()) def testPmfBothZeroBatchesNontrivialN(self): # The probabilities of one vote falling into class k is the mean for class # k. with self.test_session(): # Both zero-batches. No broadcast alpha = [1., 2] counts = [3., 2] dist = ds.DirichletMultinomial(5., alpha) pmf = dist.prob(counts) self.assertAllClose(1 / 7., pmf.eval()) self.assertEqual((), pmf.get_shape()) def testPmfBothZeroBatchesMultidimensionalN(self): # The probabilities of one vote falling into class k is the mean for class # k. with self.test_session(): alpha = [1., 2] counts = [3., 2] n = np.full([4, 3], 5., dtype=np.float32) dist = ds.DirichletMultinomial(n, alpha) pmf = dist.prob(counts) self.assertAllClose([[1 / 7., 1 / 7., 1 / 7.]] * 4, pmf.eval()) self.assertEqual((4, 3), pmf.get_shape()) def testPmfAlphaStretchedInBroadcastWhenSameRank(self): # The probabilities of one vote falling into class k is the mean for class # k. with self.test_session(): alpha = [[1., 2]] counts = [[1., 0], [0., 1]] dist = ds.DirichletMultinomial([1.], alpha) pmf = dist.prob(counts) self.assertAllClose([1 / 3., 2 / 3.], pmf.eval()) self.assertAllEqual([2], pmf.get_shape()) def testPmfAlphaStretchedInBroadcastWhenLowerRank(self): # The probabilities of one vote falling into class k is the mean for class # k. with self.test_session(): alpha = [1., 2] counts = [[1., 0], [0., 1]] pmf = ds.DirichletMultinomial(1., alpha).prob(counts) self.assertAllClose([1 / 3., 2 / 3.], pmf.eval()) self.assertAllEqual([2], pmf.get_shape()) def testPmfCountsStretchedInBroadcastWhenSameRank(self): # The probabilities of one vote falling into class k is the mean for class # k. with self.test_session(): alpha = [[1., 2], [2., 3]] counts = [[1., 0]] pmf = ds.DirichletMultinomial([1., 1.], alpha).prob(counts) self.assertAllClose([1 / 3., 2 / 5.], pmf.eval()) self.assertAllEqual([2], pmf.get_shape()) def testPmfCountsStretchedInBroadcastWhenLowerRank(self): # The probabilities of one vote falling into class k is the mean for class # k. with self.test_session(): alpha = [[1., 2], [2., 3]] counts = [1., 0] pmf = ds.DirichletMultinomial(1., alpha).prob(counts) self.assertAllClose([1 / 3., 2 / 5.], pmf.eval()) self.assertAllEqual([2], pmf.get_shape()) def testPmfForOneVoteIsTheMeanWithOneRecordInput(self): # The probabilities of one vote falling into class k is the mean for class # k. alpha = [1., 2, 3] with self.test_session(): for class_num in range(3): counts = np.zeros([3], dtype=np.float32) counts[class_num] = 1 dist = ds.DirichletMultinomial(1., alpha) mean = dist.mean().eval() pmf = dist.prob(counts).eval() self.assertAllClose(mean[class_num], pmf) self.assertAllEqual([3], mean.shape) self.assertAllEqual([], pmf.shape) def testMeanDoubleTwoVotes(self): # The probabilities of two votes falling into class k for # DirichletMultinomial(2, alpha) is twice as much as the probability of one # vote falling into class k for DirichletMultinomial(1, alpha) alpha = [1., 2, 3] with self.test_session(): for class_num in range(3): counts_one = np.zeros([3], dtype=np.float32) counts_one[class_num] = 1. counts_two = np.zeros([3], dtype=np.float32) counts_two[class_num] = 2 dist1 = ds.DirichletMultinomial(1., alpha) dist2 = ds.DirichletMultinomial(2., alpha) mean1 = dist1.mean().eval() mean2 = dist2.mean().eval() self.assertAllClose(mean2[class_num], 2 * mean1[class_num]) self.assertAllEqual([3], mean1.shape) def testCovarianceFromSampling(self): # We will test mean, cov, var, stddev on a DirichletMultinomial constructed # via broadcast between alpha, n. alpha = np.array([[1., 2, 3], [2.5, 4, 0.01]], dtype=np.float32) # Ideally we'd be able to test broadcasting but, the multinomial sampler # doesn't support different total counts. n = np.float32(5) with self.test_session() as sess: # batch_shape=[2], event_shape=[3] dist = ds.DirichletMultinomial(n, alpha) x = dist.sample(int(250e3), seed=1) sample_mean = math_ops.reduce_mean(x, 0) x_centered = x - sample_mean[array_ops.newaxis, ...] sample_cov = math_ops.reduce_mean(math_ops.matmul( x_centered[..., array_ops.newaxis], x_centered[..., array_ops.newaxis, :]), 0) sample_var = array_ops.matrix_diag_part(sample_cov) sample_stddev = math_ops.sqrt(sample_var) [ sample_mean_, sample_cov_, sample_var_, sample_stddev_, analytic_mean, analytic_cov, analytic_var, analytic_stddev, ] = sess.run([ sample_mean, sample_cov, sample_var, sample_stddev, dist.mean(), dist.covariance(), dist.variance(), dist.stddev(), ]) self.assertAllClose(sample_mean_, analytic_mean, atol=0., rtol=0.04) self.assertAllClose(sample_cov_, analytic_cov, atol=0., rtol=0.05) self.assertAllClose(sample_var_, analytic_var, atol=0., rtol=0.03) self.assertAllClose(sample_stddev_, analytic_stddev, atol=0., rtol=0.02) def testCovariance(self): # Shape [2] alpha = [1., 2] ns = [2., 3., 4., 5.] alpha_0 = np.sum(alpha) # Diagonal entries are of the form: # Var(X_i) = n * alpha_i / alpha_sum * (1 - alpha_i / alpha_sum) * # (alpha_sum + n) / (alpha_sum + 1) variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum) # Off diagonal entries are of the form: # Cov(X_i, X_j) = -n * alpha_i * alpha_j / (alpha_sum ** 2) * # (alpha_sum + n) / (alpha_sum + 1) covariance_entry = lambda a, b, a_sum: -a * b / a_sum**2 # Shape [2, 2]. shared_matrix = np.array([[ variance_entry(alpha[0], alpha_0), covariance_entry(alpha[0], alpha[1], alpha_0) ], [ covariance_entry(alpha[1], alpha[0], alpha_0), variance_entry(alpha[1], alpha_0) ]]) with self.test_session(): for n in ns: # n is shape [] and alpha is shape [2]. dist = ds.DirichletMultinomial(n, alpha) covariance = dist.covariance() expected_covariance = n * (n + alpha_0) / (1 + alpha_0) * shared_matrix self.assertEqual([2, 2], covariance.get_shape()) self.assertAllClose(expected_covariance, covariance.eval()) def testCovarianceNAlphaBroadcast(self): alpha_v = [1., 2, 3] alpha_0 = 6. # Shape [4, 3] alpha = np.array(4 * [alpha_v], dtype=np.float32) # Shape [4, 1] ns = np.array([[2.], [3.], [4.], [5.]], dtype=np.float32) variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum) covariance_entry = lambda a, b, a_sum: -a * b / a_sum**2 # Shape [4, 3, 3] shared_matrix = np.array( 4 * [[[ variance_entry(alpha_v[0], alpha_0), covariance_entry(alpha_v[0], alpha_v[1], alpha_0), covariance_entry(alpha_v[0], alpha_v[2], alpha_0) ], [ covariance_entry(alpha_v[1], alpha_v[0], alpha_0), variance_entry(alpha_v[1], alpha_0), covariance_entry(alpha_v[1], alpha_v[2], alpha_0) ], [ covariance_entry(alpha_v[2], alpha_v[0], alpha_0), covariance_entry(alpha_v[2], alpha_v[1], alpha_0), variance_entry(alpha_v[2], alpha_0) ]]], dtype=np.float32) with self.test_session(): # ns is shape [4, 1], and alpha is shape [4, 3]. dist = ds.DirichletMultinomial(ns, alpha) covariance = dist.covariance() expected_covariance = shared_matrix * ( ns * (ns + alpha_0) / (1 + alpha_0))[..., array_ops.newaxis] self.assertEqual([4, 3, 3], covariance.get_shape()) self.assertAllClose(expected_covariance, covariance.eval()) def testCovarianceMultidimensional(self): alpha = np.random.rand(3, 5, 4).astype(np.float32) alpha2 = np.random.rand(6, 3, 3).astype(np.float32) ns = np.random.randint(low=1, high=11, size=[3, 5, 1]).astype(np.float32) ns2 = np.random.randint(low=1, high=11, size=[6, 1, 1]).astype(np.float32) with self.test_session(): dist = ds.DirichletMultinomial(ns, alpha) dist2 = ds.DirichletMultinomial(ns2, alpha2) covariance = dist.covariance() covariance2 = dist2.covariance() self.assertEqual([3, 5, 4, 4], covariance.get_shape()) self.assertEqual([6, 3, 3, 3], covariance2.get_shape()) def testZeroCountsResultsInPmfEqualToOne(self): # There is only one way for zero items to be selected, and this happens with # probability 1. alpha = [5, 0.5] counts = [0., 0] with self.test_session(): dist = ds.DirichletMultinomial(0., alpha) pmf = dist.prob(counts) self.assertAllClose(1.0, pmf.eval()) self.assertEqual((), pmf.get_shape()) def testLargeTauGivesPreciseProbabilities(self): # If tau is large, we are doing coin flips with probability mu. mu = np.array([0.1, 0.1, 0.8], dtype=np.float32) tau = np.array([100.], dtype=np.float32) alpha = tau * mu # One (three sided) coin flip. Prob[coin 3] = 0.8. # Note that since it was one flip, value of tau didn't matter. counts = [0., 0, 1] with self.test_session(): dist = ds.DirichletMultinomial(1., alpha) pmf = dist.prob(counts) self.assertAllClose(0.8, pmf.eval(), atol=1e-4) self.assertEqual((), pmf.get_shape()) # Two (three sided) coin flips. Prob[coin 3] = 0.8. counts = [0., 0, 2] with self.test_session(): dist = ds.DirichletMultinomial(2., alpha) pmf = dist.prob(counts) self.assertAllClose(0.8**2, pmf.eval(), atol=1e-2) self.assertEqual((), pmf.get_shape()) # Three (three sided) coin flips. counts = [1., 0, 2] with self.test_session(): dist = ds.DirichletMultinomial(3., alpha) pmf = dist.prob(counts) self.assertAllClose(3 * 0.1 * 0.8 * 0.8, pmf.eval(), atol=1e-2) self.assertEqual((), pmf.get_shape()) def testSmallTauPrefersCorrelatedResults(self): # If tau is small, then correlation between draws is large, so draws that # are both of the same class are more likely. mu = np.array([0.5, 0.5], dtype=np.float32) tau = np.array([0.1], dtype=np.float32) alpha = tau * mu # If there is only one draw, it is still a coin flip, even with small tau. counts = [1., 0] with self.test_session(): dist = ds.DirichletMultinomial(1., alpha) pmf = dist.prob(counts) self.assertAllClose(0.5, pmf.eval()) self.assertEqual((), pmf.get_shape()) # If there are two draws, it is much more likely that they are the same. counts_same = [2., 0] counts_different = [1, 1.] with self.test_session(): dist = ds.DirichletMultinomial(2., alpha) pmf_same = dist.prob(counts_same) pmf_different = dist.prob(counts_different) self.assertLess(5 * pmf_different.eval(), pmf_same.eval()) self.assertEqual((), pmf_same.get_shape()) def testNonStrictTurnsOffAllChecks(self): # Make totally invalid input. with self.test_session(): alpha = [[-1., 2]] # alpha should be positive. counts = [[1., 0], [0., -1]] # counts should be non-negative. n = [-5.3] # n should be a non negative integer equal to counts.sum. dist = ds.DirichletMultinomial(n, alpha, validate_args=False) dist.prob(counts).eval() # Should not raise. def testSampleUnbiasedNonScalarBatch(self): with self.test_session() as sess: dist = ds.DirichletMultinomial( total_count=5., concentration=1. + 2. * self._rng.rand(4, 3, 2).astype(np.float32)) n = int(3e3) x = dist.sample(n, seed=0) sample_mean = math_ops.reduce_mean(x, 0) # Cyclically rotate event dims left. x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0]) sample_covariance = math_ops.matmul( x_centered, x_centered, adjoint_b=True) / n [ sample_mean_, sample_covariance_, actual_mean_, actual_covariance_, ] = sess.run([ sample_mean, sample_covariance, dist.mean(), dist.covariance(), ]) self.assertAllEqual([4, 3, 2], sample_mean.get_shape()) self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.15) self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape()) self.assertAllClose( actual_covariance_, sample_covariance_, atol=0., rtol=0.20) def testSampleUnbiasedScalarBatch(self): with self.test_session() as sess: dist = ds.DirichletMultinomial( total_count=5., concentration=1. + 2. * self._rng.rand(4).astype(np.float32)) n = int(5e3) x = dist.sample(n, seed=0) sample_mean = math_ops.reduce_mean(x, 0) x_centered = x - sample_mean # Already transposed to [n, 2]. sample_covariance = math_ops.matmul( x_centered, x_centered, adjoint_a=True) / n [ sample_mean_, sample_covariance_, actual_mean_, actual_covariance_, ] = sess.run([ sample_mean, sample_covariance, dist.mean(), dist.covariance(), ]) self.assertAllEqual([4], sample_mean.get_shape()) self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.05) self.assertAllEqual([4, 4], sample_covariance.get_shape()) self.assertAllClose( actual_covariance_, sample_covariance_, atol=0., rtol=0.15) if __name__ == "__main__": test.main()
apache-2.0
bigdatauniversity/edx-platform
lms/djangoapps/discussion_api/tests/test_views.py
14
52323
""" Tests for Discussion API views """ from datetime import datetime import json from urlparse import urlparse import ddt import httpretty import mock from pytz import UTC from django.core.urlresolvers import reverse from rest_framework.parsers import JSONParser from rest_framework.test import APIClient from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from common.test.utils import disable_signal from discussion_api import api from discussion_api.tests.utils import ( CommentsServiceMockMixin, make_minimal_cs_comment, make_minimal_cs_thread, ) from student.tests.factories import CourseEnrollmentFactory, UserFactory from util.testing import UrlResetMixin, PatchMediaTypeMixin from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, check_mongo_calls, ItemFactory class DiscussionAPIViewTestMixin(CommentsServiceMockMixin, UrlResetMixin): """ Mixin for common code in tests of Discussion API views. This includes creation of common structures (e.g. a course, user, and enrollment), logging in the test client, utility functions, and a test case for unauthenticated requests. Subclasses must set self.url in their setUp methods. """ client_class = APIClient @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) def setUp(self): super(DiscussionAPIViewTestMixin, self).setUp() self.maxDiff = None # pylint: disable=invalid-name self.course = CourseFactory.create( org="x", course="y", run="z", start=datetime.now(UTC), discussion_topics={"Test Topic": {"id": "test_topic"}} ) self.password = "password" self.user = UserFactory.create(password=self.password) CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id) self.client.login(username=self.user.username, password=self.password) def assert_response_correct(self, response, expected_status, expected_content): """ Assert that the response has the given status code and parsed content """ self.assertEqual(response.status_code, expected_status) parsed_content = json.loads(response.content) self.assertEqual(parsed_content, expected_content) def register_thread(self, overrides=None): """ Create cs_thread with minimal fields and register response """ cs_thread = make_minimal_cs_thread({ "id": "test_thread", "course_id": unicode(self.course.id), "commentable_id": "original_topic", "username": self.user.username, "user_id": str(self.user.id), "thread_type": "discussion", "title": "Original Title", "body": "Original body", }) cs_thread.update(overrides or {}) self.register_get_thread_response(cs_thread) self.register_put_thread_response(cs_thread) def register_comment(self, overrides=None): """ Create cs_comment with minimal fields and register response """ cs_comment = make_minimal_cs_comment({ "id": "test_comment", "course_id": unicode(self.course.id), "thread_id": "test_thread", "username": self.user.username, "user_id": str(self.user.id), "body": "Original body", }) cs_comment.update(overrides or {}) self.register_get_comment_response(cs_comment) self.register_put_comment_response(cs_comment) self.register_post_comment_response(cs_comment, thread_id="test_thread") def test_not_authenticated(self): self.client.logout() response = self.client.get(self.url) self.assert_response_correct( response, 401, {"developer_message": "Authentication credentials were not provided."} ) def test_inactive(self): self.user.is_active = False self.test_basic() @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class CourseViewTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for CourseView""" def setUp(self): super(CourseViewTest, self).setUp() self.url = reverse("discussion_course", kwargs={"course_id": unicode(self.course.id)}) def test_404(self): response = self.client.get( reverse("course_topics", kwargs={"course_id": "non/existent/course"}) ) self.assert_response_correct( response, 404, {"developer_message": "Course not found."} ) def test_basic(self): response = self.client.get(self.url) self.assert_response_correct( response, 200, { "id": unicode(self.course.id), "blackouts": [], "thread_list_url": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz", "following_thread_list_url": ( "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&following=True" ), "topics_url": "http://testserver/api/discussion/v1/course_topics/x/y/z", } ) @ddt.ddt @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class CourseTopicsViewTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for CourseTopicsView""" def setUp(self): super(CourseTopicsViewTest, self).setUp() self.url = reverse("course_topics", kwargs={"course_id": unicode(self.course.id)}) def create_course(self, modules_count, module_store, topics): """ Create a course in a specified module store with discussion module and topics """ course = CourseFactory.create( org="a", course="b", run="c", start=datetime.now(UTC), default_store=module_store, discussion_topics=topics ) CourseEnrollmentFactory.create(user=self.user, course_id=course.id) course_url = reverse("course_topics", kwargs={"course_id": unicode(course.id)}) # add some discussion modules for i in range(modules_count): ItemFactory.create( parent_location=course.location, category='discussion', discussion_id='id_module_{}'.format(i), discussion_category='Category {}'.format(i), discussion_target='Discussion {}'.format(i), publish_item=False, ) return course_url def test_404(self): response = self.client.get( reverse("course_topics", kwargs={"course_id": "non/existent/course"}) ) self.assert_response_correct( response, 404, {"developer_message": "Course not found."} ) def test_basic(self): response = self.client.get(self.url) self.assert_response_correct( response, 200, { "courseware_topics": [], "non_courseware_topics": [{ "id": "test_topic", "name": "Test Topic", "children": [], "thread_list_url": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&topic_id=test_topic", }], } ) @ddt.data( (2, ModuleStoreEnum.Type.mongo, 2, {"Test Topic 1": {"id": "test_topic_1"}}), (2, ModuleStoreEnum.Type.mongo, 2, {"Test Topic 1": {"id": "test_topic_1"}, "Test Topic 2": {"id": "test_topic_2"}}), (2, ModuleStoreEnum.Type.split, 3, {"Test Topic 1": {"id": "test_topic_1"}}), (2, ModuleStoreEnum.Type.split, 3, {"Test Topic 1": {"id": "test_topic_1"}, "Test Topic 2": {"id": "test_topic_2"}}), (10, ModuleStoreEnum.Type.split, 3, {"Test Topic 1": {"id": "test_topic_1"}}), ) @ddt.unpack def test_bulk_response(self, modules_count, module_store, mongo_calls, topics): course_url = self.create_course(modules_count, module_store, topics) with check_mongo_calls(mongo_calls): with modulestore().default_store(module_store): self.client.get(course_url) @ddt.ddt @httpretty.activate @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class ThreadViewSetListTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for ThreadViewSet list""" def setUp(self): super(ThreadViewSetListTest, self).setUp() self.author = UserFactory.create() self.url = reverse("thread-list") def test_course_id_missing(self): response = self.client.get(self.url) self.assert_response_correct( response, 400, {"field_errors": {"course_id": {"developer_message": "This field is required."}}} ) def test_404(self): response = self.client.get(self.url, {"course_id": unicode("non/existent/course")}) self.assert_response_correct( response, 404, {"developer_message": "Course not found."} ) def test_basic(self): self.register_get_user_response(self.user, upvoted_ids=["test_thread"]) source_threads = [{ "type": "thread", "id": "test_thread", "course_id": unicode(self.course.id), "commentable_id": "test_topic", "group_id": None, "user_id": str(self.author.id), "username": self.author.username, "anonymous": False, "anonymous_to_peers": False, "created_at": "2015-04-28T00:00:00Z", "updated_at": "2015-04-28T11:11:11Z", "thread_type": "discussion", "title": "Test Title", "body": "Test body", "pinned": False, "closed": False, "abuse_flaggers": [], "votes": {"up_count": 4}, "comments_count": 5, "unread_comments_count": 3, "read": False, "endorsed": False }] expected_threads = [{ "id": "test_thread", "course_id": unicode(self.course.id), "topic_id": "test_topic", "group_id": None, "group_name": None, "author": self.author.username, "author_label": None, "created_at": "2015-04-28T00:00:00Z", "updated_at": "2015-04-28T11:11:11Z", "type": "discussion", "title": "Test Title", "raw_body": "Test body", "rendered_body": "<p>Test body</p>", "pinned": False, "closed": False, "following": False, "abuse_flagged": False, "voted": True, "vote_count": 4, "comment_count": 6, "unread_comment_count": 4, "comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread", "endorsed_comment_list_url": None, "non_endorsed_comment_list_url": None, "editable_fields": ["abuse_flagged", "following", "read", "voted"], "read": False, "has_endorsed": False, }] self.register_get_threads_response(source_threads, page=1, num_pages=2) response = self.client.get(self.url, {"course_id": unicode(self.course.id), "following": ""}) self.assert_response_correct( response, 200, { "results": expected_threads, "next": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&page=2", "previous": None, "text_search_rewrite": None, } ) self.assert_last_query_params({ "user_id": [unicode(self.user.id)], "course_id": [unicode(self.course.id)], "sort_key": ["activity"], "sort_order": ["desc"], "page": ["1"], "per_page": ["10"], "recursive": ["False"], }) @ddt.data("unread", "unanswered") def test_view_query(self, query): threads = [make_minimal_cs_thread()] self.register_get_user_response(self.user) self.register_get_threads_response(threads, page=1, num_pages=1) self.client.get( self.url, { "course_id": unicode(self.course.id), "view": query, } ) self.assert_last_query_params({ "user_id": [unicode(self.user.id)], "course_id": [unicode(self.course.id)], "sort_key": ["activity"], "sort_order": ["desc"], "recursive": ["False"], "page": ["1"], "per_page": ["10"], query: ["true"], }) def test_pagination(self): self.register_get_user_response(self.user) self.register_get_threads_response([], page=1, num_pages=1) response = self.client.get( self.url, {"course_id": unicode(self.course.id), "page": "18", "page_size": "4"} ) self.assert_response_correct( response, 404, {"developer_message": "Not found."} ) self.assert_last_query_params({ "user_id": [unicode(self.user.id)], "course_id": [unicode(self.course.id)], "sort_key": ["activity"], "sort_order": ["desc"], "page": ["18"], "per_page": ["4"], "recursive": ["False"], }) def test_text_search(self): self.register_get_user_response(self.user) self.register_get_threads_search_response([], None) response = self.client.get( self.url, {"course_id": unicode(self.course.id), "text_search": "test search string"} ) self.assert_response_correct( response, 200, {"results": [], "next": None, "previous": None, "text_search_rewrite": None} ) self.assert_last_query_params({ "user_id": [unicode(self.user.id)], "course_id": [unicode(self.course.id)], "sort_key": ["activity"], "sort_order": ["desc"], "page": ["1"], "per_page": ["10"], "recursive": ["False"], "text": ["test search string"], }) @ddt.data(True, "true", "1") def test_following_true(self, following): self.register_get_user_response(self.user) self.register_subscribed_threads_response(self.user, [], page=1, num_pages=1) response = self.client.get( self.url, { "course_id": unicode(self.course.id), "following": following, } ) self.assert_response_correct( response, 200, {"results": [], "next": None, "previous": None, "text_search_rewrite": None} ) self.assertEqual( urlparse(httpretty.last_request().path).path, "/api/v1/users/{}/subscribed_threads".format(self.user.id) ) @ddt.data(False, "false", "0") def test_following_false(self, following): response = self.client.get( self.url, { "course_id": unicode(self.course.id), "following": following, } ) self.assert_response_correct( response, 400, {"field_errors": { "following": {"developer_message": "The value of the 'following' parameter must be true."} }} ) def test_following_error(self): response = self.client.get( self.url, { "course_id": unicode(self.course.id), "following": "invalid-boolean", } ) self.assert_response_correct( response, 400, {"field_errors": { "following": {"developer_message": "Invalid Boolean Value."} }} ) @ddt.data( ("last_activity_at", "activity"), ("comment_count", "comments"), ("vote_count", "votes") ) @ddt.unpack def test_order_by(self, http_query, cc_query): """ Tests the order_by parameter Arguments: http_query (str): Query string sent in the http request cc_query (str): Query string used for the comments client service """ threads = [make_minimal_cs_thread()] self.register_get_user_response(self.user) self.register_get_threads_response(threads, page=1, num_pages=1) self.client.get( self.url, { "course_id": unicode(self.course.id), "order_by": http_query, } ) self.assert_last_query_params({ "user_id": [unicode(self.user.id)], "course_id": [unicode(self.course.id)], "sort_order": ["desc"], "recursive": ["False"], "page": ["1"], "per_page": ["10"], "sort_key": [cc_query], }) @ddt.data("asc", "desc") def test_order_direction(self, query): threads = [make_minimal_cs_thread()] self.register_get_user_response(self.user) self.register_get_threads_response(threads, page=1, num_pages=1) self.client.get( self.url, { "course_id": unicode(self.course.id), "order_direction": query, } ) self.assert_last_query_params({ "user_id": [unicode(self.user.id)], "course_id": [unicode(self.course.id)], "sort_key": ["activity"], "recursive": ["False"], "page": ["1"], "per_page": ["10"], "sort_order": [query], }) @httpretty.activate @disable_signal(api, 'thread_created') @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class ThreadViewSetCreateTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for ThreadViewSet create""" def setUp(self): super(ThreadViewSetCreateTest, self).setUp() self.url = reverse("thread-list") def test_basic(self): self.register_get_user_response(self.user) cs_thread = make_minimal_cs_thread({ "id": "test_thread", "username": self.user.username, "created_at": "2015-05-19T00:00:00Z", "updated_at": "2015-05-19T00:00:00Z", }) self.register_post_thread_response(cs_thread) request_data = { "course_id": unicode(self.course.id), "topic_id": "test_topic", "type": "discussion", "title": "Test Title", "raw_body": "Test body", } expected_response_data = { "id": "test_thread", "course_id": unicode(self.course.id), "topic_id": "test_topic", "group_id": None, "group_name": None, "author": self.user.username, "author_label": None, "created_at": "2015-05-19T00:00:00Z", "updated_at": "2015-05-19T00:00:00Z", "type": "discussion", "title": "Test Title", "raw_body": "Test body", "rendered_body": "<p>Test body</p>", "pinned": False, "closed": False, "following": False, "abuse_flagged": False, "voted": False, "vote_count": 0, "comment_count": 1, "unread_comment_count": 1, "comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread", "endorsed_comment_list_url": None, "non_endorsed_comment_list_url": None, "editable_fields": ["abuse_flagged", "following", "raw_body", "read", "title", "topic_id", "type", "voted"], "read": False, "has_endorsed": False, "response_count": 0, } response = self.client.post( self.url, json.dumps(request_data), content_type="application/json" ) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual(response_data, expected_response_data) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [unicode(self.course.id)], "commentable_id": ["test_topic"], "thread_type": ["discussion"], "title": ["Test Title"], "body": ["Test body"], "user_id": [str(self.user.id)], } ) def test_error(self): request_data = { "topic_id": "dummy", "type": "discussion", "title": "dummy", "raw_body": "dummy", } response = self.client.post( self.url, json.dumps(request_data), content_type="application/json" ) expected_response_data = { "field_errors": {"course_id": {"developer_message": "This field is required."}} } self.assertEqual(response.status_code, 400) response_data = json.loads(response.content) self.assertEqual(response_data, expected_response_data) @ddt.ddt @httpretty.activate @disable_signal(api, 'thread_edited') @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class ThreadViewSetPartialUpdateTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase, PatchMediaTypeMixin): """Tests for ThreadViewSet partial_update""" def setUp(self): self.unsupported_media_type = JSONParser.media_type super(ThreadViewSetPartialUpdateTest, self).setUp() self.url = reverse("thread-detail", kwargs={"thread_id": "test_thread"}) def expected_response_data(self, overrides=None): """ create expected response data from comment update endpoint """ response_data = { "id": "test_thread", "course_id": unicode(self.course.id), "topic_id": "original_topic", "group_id": None, "group_name": None, "author": self.user.username, "author_label": None, "created_at": "1970-01-01T00:00:00Z", "updated_at": "1970-01-01T00:00:00Z", "type": "discussion", "title": "Original Title", "raw_body": "Original body", "rendered_body": "<p>Original body</p>", "pinned": False, "closed": False, "following": False, "abuse_flagged": False, "voted": False, "vote_count": 0, "comment_count": 0, "unread_comment_count": 0, "comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread", "endorsed_comment_list_url": None, "non_endorsed_comment_list_url": None, "editable_fields": [], "read": False, "has_endorsed": False, "response_count": 0, } response_data.update(overrides or {}) return response_data def test_basic(self): self.register_get_user_response(self.user) self.register_thread({"created_at": "Test Created Date", "updated_at": "Test Updated Date"}) request_data = {"raw_body": "Edited body"} response = self.request_patch(request_data) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual( response_data, self.expected_response_data({ "raw_body": "Edited body", "rendered_body": "<p>Edited body</p>", "editable_fields": [ "abuse_flagged", "following", "raw_body", "read", "title", "topic_id", "type", "voted" ], "created_at": "Test Created Date", "updated_at": "Test Updated Date", "comment_count": 1, }) ) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [unicode(self.course.id)], "commentable_id": ["original_topic"], "thread_type": ["discussion"], "title": ["Original Title"], "body": ["Edited body"], "user_id": [str(self.user.id)], "anonymous": ["False"], "anonymous_to_peers": ["False"], "closed": ["False"], "pinned": ["False"], "read": ["False"], "requested_user_id": [str(self.user.id)], } ) def test_error(self): self.register_get_user_response(self.user) self.register_thread() request_data = {"title": ""} response = self.request_patch(request_data) expected_response_data = { "field_errors": {"title": {"developer_message": "This field may not be blank."}} } self.assertEqual(response.status_code, 400) response_data = json.loads(response.content) self.assertEqual(response_data, expected_response_data) @ddt.data( ("abuse_flagged", True), ("abuse_flagged", False), ) @ddt.unpack def test_closed_thread(self, field, value): self.register_get_user_response(self.user) self.register_thread({"closed": True}) self.register_flag_response("thread", "test_thread") request_data = {field: value} response = self.request_patch(request_data) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual( response_data, self.expected_response_data({ "closed": True, "abuse_flagged": value, "editable_fields": ["abuse_flagged", "read"], "comment_count": 1, "unread_comment_count": 1, }) ) @ddt.data( ("raw_body", "Edited body"), ("voted", True), ("following", True), ) @ddt.unpack def test_closed_thread_error(self, field, value): self.register_get_user_response(self.user) self.register_thread({"closed": True}) self.register_flag_response("thread", "test_thread") request_data = {field: value} response = self.request_patch(request_data) self.assertEqual(response.status_code, 400) def test_patch_read_owner_user(self): self.register_get_user_response(self.user) self.register_thread() request_data = {"read": True} response = self.request_patch(request_data) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual( response_data, self.expected_response_data({ "comment_count": 1, "read": True, "editable_fields": [ "abuse_flagged", "following", "raw_body", "read", "title", "topic_id", "type", "voted" ], }) ) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [unicode(self.course.id)], "commentable_id": ["original_topic"], "thread_type": ["discussion"], "title": ["Original Title"], "body": ["Original body"], "user_id": [str(self.user.id)], "anonymous": ["False"], "anonymous_to_peers": ["False"], "closed": ["False"], "pinned": ["False"], "read": ["True"], "requested_user_id": [str(self.user.id)], } ) def test_patch_read_non_owner_user(self): self.register_get_user_response(self.user) thread_owner_user = UserFactory.create(password=self.password) CourseEnrollmentFactory.create(user=thread_owner_user, course_id=self.course.id) self.register_get_user_response(thread_owner_user) self.register_thread({"username": thread_owner_user.username, "user_id": str(thread_owner_user.id)}) request_data = {"read": True} response = self.request_patch(request_data) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual( response_data, self.expected_response_data({ "author": str(thread_owner_user.username), "comment_count": 1, "read": True, "editable_fields": [ "abuse_flagged", "following", "read", "voted" ], }) ) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [unicode(self.course.id)], "commentable_id": ["original_topic"], "thread_type": ["discussion"], "title": ["Original Title"], "body": ["Original body"], "user_id": [str(thread_owner_user.id)], "anonymous": ["False"], "anonymous_to_peers": ["False"], "closed": ["False"], "pinned": ["False"], "read": ["True"], "requested_user_id": [str(self.user.id)], } ) @httpretty.activate @disable_signal(api, 'thread_deleted') @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class ThreadViewSetDeleteTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for ThreadViewSet delete""" def setUp(self): super(ThreadViewSetDeleteTest, self).setUp() self.url = reverse("thread-detail", kwargs={"thread_id": "test_thread"}) self.thread_id = "test_thread" def test_basic(self): self.register_get_user_response(self.user) cs_thread = make_minimal_cs_thread({ "id": self.thread_id, "course_id": unicode(self.course.id), "username": self.user.username, "user_id": str(self.user.id), }) self.register_get_thread_response(cs_thread) self.register_delete_thread_response(self.thread_id) response = self.client.delete(self.url) self.assertEqual(response.status_code, 204) self.assertEqual(response.content, "") self.assertEqual( urlparse(httpretty.last_request().path).path, "/api/v1/threads/{}".format(self.thread_id) ) self.assertEqual(httpretty.last_request().method, "DELETE") def test_delete_nonexistent_thread(self): self.register_get_thread_error_response(self.thread_id, 404) response = self.client.delete(self.url) self.assertEqual(response.status_code, 404) @ddt.ddt @httpretty.activate @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class CommentViewSetListTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for CommentViewSet list""" def setUp(self): super(CommentViewSetListTest, self).setUp() self.author = UserFactory.create() self.url = reverse("comment-list") self.thread_id = "test_thread" def make_minimal_cs_thread(self, overrides=None): """ Create a thread with the given overrides, plus the course_id if not already in overrides. """ overrides = overrides.copy() if overrides else {} overrides.setdefault("course_id", unicode(self.course.id)) return make_minimal_cs_thread(overrides) def test_thread_id_missing(self): response = self.client.get(self.url) self.assert_response_correct( response, 400, {"field_errors": {"thread_id": {"developer_message": "This field is required."}}} ) def test_404(self): self.register_get_thread_error_response(self.thread_id, 404) response = self.client.get(self.url, {"thread_id": self.thread_id}) self.assert_response_correct( response, 404, {"developer_message": "Not found."} ) def test_basic(self): self.register_get_user_response(self.user, upvoted_ids=["test_comment"]) source_comments = [{ "type": "comment", "id": "test_comment", "thread_id": self.thread_id, "parent_id": None, "user_id": str(self.author.id), "username": self.author.username, "anonymous": False, "anonymous_to_peers": False, "created_at": "2015-05-11T00:00:00Z", "updated_at": "2015-05-11T11:11:11Z", "body": "Test body", "endorsed": False, "abuse_flaggers": [], "votes": {"up_count": 4}, }] expected_comments = [{ "id": "test_comment", "thread_id": self.thread_id, "parent_id": None, "author": self.author.username, "author_label": None, "created_at": "2015-05-11T00:00:00Z", "updated_at": "2015-05-11T11:11:11Z", "raw_body": "Test body", "rendered_body": "<p>Test body</p>", "endorsed": False, "endorsed_by": None, "endorsed_by_label": None, "endorsed_at": None, "abuse_flagged": False, "voted": True, "vote_count": 4, "editable_fields": ["abuse_flagged", "voted"], "children": [], }] self.register_get_thread_response({ "id": self.thread_id, "course_id": unicode(self.course.id), "thread_type": "discussion", "children": source_comments, "resp_total": 100, }) response = self.client.get(self.url, {"thread_id": self.thread_id}) self.assert_response_correct( response, 200, { "results": expected_comments, "next": "http://testserver/api/discussion/v1/comments/?page=2&thread_id={}".format( self.thread_id ), "previous": None, } ) self.assert_query_params_equal( httpretty.httpretty.latest_requests[-2], { "recursive": ["False"], "resp_skip": ["0"], "resp_limit": ["10"], "user_id": [str(self.user.id)], "mark_as_read": ["False"], } ) def test_pagination(self): """ Test that pagination parameters are correctly plumbed through to the comments service and that a 404 is correctly returned if a page past the end is requested """ self.register_get_user_response(self.user) self.register_get_thread_response(make_minimal_cs_thread({ "id": self.thread_id, "course_id": unicode(self.course.id), "thread_type": "discussion", "children": [], "resp_total": 10, })) response = self.client.get( self.url, {"thread_id": self.thread_id, "page": "18", "page_size": "4"} ) self.assert_response_correct( response, 404, {"developer_message": "Not found."} ) self.assert_query_params_equal( httpretty.httpretty.latest_requests[-2], { "recursive": ["False"], "resp_skip": ["68"], "resp_limit": ["4"], "user_id": [str(self.user.id)], "mark_as_read": ["False"], } ) @ddt.data( (True, "endorsed_comment"), ("true", "endorsed_comment"), ("1", "endorsed_comment"), (False, "non_endorsed_comment"), ("false", "non_endorsed_comment"), ("0", "non_endorsed_comment"), ) @ddt.unpack def test_question_content(self, endorsed, comment_id): self.register_get_user_response(self.user) thread = self.make_minimal_cs_thread({ "thread_type": "question", "endorsed_responses": [make_minimal_cs_comment({"id": "endorsed_comment"})], "non_endorsed_responses": [make_minimal_cs_comment({"id": "non_endorsed_comment"})], "non_endorsed_resp_total": 1, }) self.register_get_thread_response(thread) response = self.client.get(self.url, { "thread_id": thread["id"], "endorsed": endorsed, }) parsed_content = json.loads(response.content) self.assertEqual(parsed_content["results"][0]["id"], comment_id) @httpretty.activate @disable_signal(api, 'comment_deleted') @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class CommentViewSetDeleteTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for ThreadViewSet delete""" def setUp(self): super(CommentViewSetDeleteTest, self).setUp() self.url = reverse("comment-detail", kwargs={"comment_id": "test_comment"}) self.comment_id = "test_comment" def test_basic(self): self.register_get_user_response(self.user) cs_thread = make_minimal_cs_thread({ "id": "test_thread", "course_id": unicode(self.course.id), }) self.register_get_thread_response(cs_thread) cs_comment = make_minimal_cs_comment({ "id": self.comment_id, "course_id": cs_thread["course_id"], "thread_id": cs_thread["id"], "username": self.user.username, "user_id": str(self.user.id), }) self.register_get_comment_response(cs_comment) self.register_delete_comment_response(self.comment_id) response = self.client.delete(self.url) self.assertEqual(response.status_code, 204) self.assertEqual(response.content, "") self.assertEqual( urlparse(httpretty.last_request().path).path, "/api/v1/comments/{}".format(self.comment_id) ) self.assertEqual(httpretty.last_request().method, "DELETE") def test_delete_nonexistent_comment(self): self.register_get_comment_error_response(self.comment_id, 404) response = self.client.delete(self.url) self.assertEqual(response.status_code, 404) @httpretty.activate @disable_signal(api, 'comment_created') @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class CommentViewSetCreateTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for CommentViewSet create""" def setUp(self): super(CommentViewSetCreateTest, self).setUp() self.url = reverse("comment-list") def test_basic(self): self.register_get_user_response(self.user) self.register_thread() self.register_comment() request_data = { "thread_id": "test_thread", "raw_body": "Test body", } expected_response_data = { "id": "test_comment", "thread_id": "test_thread", "parent_id": None, "author": self.user.username, "author_label": None, "created_at": "1970-01-01T00:00:00Z", "updated_at": "1970-01-01T00:00:00Z", "raw_body": "Test body", "rendered_body": "<p>Test body</p>", "endorsed": False, "endorsed_by": None, "endorsed_by_label": None, "endorsed_at": None, "abuse_flagged": False, "voted": False, "vote_count": 0, "children": [], "editable_fields": ["abuse_flagged", "raw_body", "voted"], } response = self.client.post( self.url, json.dumps(request_data), content_type="application/json" ) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual(response_data, expected_response_data) self.assertEqual( urlparse(httpretty.last_request().path).path, "/api/v1/threads/test_thread/comments" ) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [unicode(self.course.id)], "body": ["Test body"], "user_id": [str(self.user.id)], } ) def test_error(self): response = self.client.post( self.url, json.dumps({}), content_type="application/json" ) expected_response_data = { "field_errors": {"thread_id": {"developer_message": "This field is required."}} } self.assertEqual(response.status_code, 400) response_data = json.loads(response.content) self.assertEqual(response_data, expected_response_data) def test_closed_thread(self): self.register_get_user_response(self.user) self.register_thread({"closed": True}) self.register_comment() request_data = { "thread_id": "test_thread", "raw_body": "Test body" } response = self.client.post( self.url, json.dumps(request_data), content_type="application/json" ) self.assertEqual(response.status_code, 403) @ddt.ddt @disable_signal(api, 'comment_edited') @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class CommentViewSetPartialUpdateTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase, PatchMediaTypeMixin): """Tests for CommentViewSet partial_update""" def setUp(self): self.unsupported_media_type = JSONParser.media_type super(CommentViewSetPartialUpdateTest, self).setUp() httpretty.reset() httpretty.enable() self.addCleanup(httpretty.disable) self.register_get_user_response(self.user) self.url = reverse("comment-detail", kwargs={"comment_id": "test_comment"}) def expected_response_data(self, overrides=None): """ create expected response data from comment update endpoint """ response_data = { "id": "test_comment", "thread_id": "test_thread", "parent_id": None, "author": self.user.username, "author_label": None, "created_at": "1970-01-01T00:00:00Z", "updated_at": "1970-01-01T00:00:00Z", "raw_body": "Original body", "rendered_body": "<p>Original body</p>", "endorsed": False, "endorsed_by": None, "endorsed_by_label": None, "endorsed_at": None, "abuse_flagged": False, "voted": False, "vote_count": 0, "children": [], "editable_fields": [], } response_data.update(overrides or {}) return response_data def test_basic(self): self.register_thread() self.register_comment({"created_at": "Test Created Date", "updated_at": "Test Updated Date"}) request_data = {"raw_body": "Edited body"} response = self.request_patch(request_data) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual( response_data, self.expected_response_data({ "raw_body": "Edited body", "rendered_body": "<p>Edited body</p>", "editable_fields": ["abuse_flagged", "raw_body", "voted"], "created_at": "Test Created Date", "updated_at": "Test Updated Date", }) ) self.assertEqual( httpretty.last_request().parsed_body, { "body": ["Edited body"], "course_id": [unicode(self.course.id)], "user_id": [str(self.user.id)], "anonymous": ["False"], "anonymous_to_peers": ["False"], "endorsed": ["False"], } ) def test_error(self): self.register_thread() self.register_comment() request_data = {"raw_body": ""} response = self.request_patch(request_data) expected_response_data = { "field_errors": {"raw_body": {"developer_message": "This field may not be blank."}} } self.assertEqual(response.status_code, 400) response_data = json.loads(response.content) self.assertEqual(response_data, expected_response_data) @ddt.data( ("abuse_flagged", True), ("abuse_flagged", False), ) @ddt.unpack def test_closed_thread(self, field, value): self.register_thread({"closed": True}) self.register_comment() self.register_flag_response("comment", "test_comment") request_data = {field: value} response = self.request_patch(request_data) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual( response_data, self.expected_response_data({ "abuse_flagged": value, "editable_fields": ["abuse_flagged"], }) ) @ddt.data( ("raw_body", "Edited body"), ("voted", True), ("following", True), ) @ddt.unpack def test_closed_thread_error(self, field, value): self.register_thread({"closed": True}) self.register_comment() request_data = {field: value} response = self.request_patch(request_data) self.assertEqual(response.status_code, 400) @httpretty.activate @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class ThreadViewSetRetrieveTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for ThreadViewSet Retrieve""" def setUp(self): super(ThreadViewSetRetrieveTest, self).setUp() self.url = reverse("thread-detail", kwargs={"thread_id": "test_thread"}) self.thread_id = "test_thread" def test_basic(self): self.register_get_user_response(self.user) cs_thread = make_minimal_cs_thread({ "id": self.thread_id, "course_id": unicode(self.course.id), "commentable_id": "test_topic", "username": self.user.username, "user_id": str(self.user.id), "title": "Test Title", "body": "Test body", "created_at": "2015-05-29T00:00:00Z", "updated_at": "2015-05-29T00:00:00Z" }) expected_response_data = { "author": self.user.username, "author_label": None, "created_at": "2015-05-29T00:00:00Z", "updated_at": "2015-05-29T00:00:00Z", "raw_body": "Test body", "rendered_body": "<p>Test body</p>", "abuse_flagged": False, "voted": False, "vote_count": 0, "editable_fields": ["abuse_flagged", "following", "raw_body", "read", "title", "topic_id", "type", "voted"], "course_id": unicode(self.course.id), "topic_id": "test_topic", "group_id": None, "group_name": None, "title": "Test Title", "pinned": False, "closed": False, "following": False, "comment_count": 1, "unread_comment_count": 1, "comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread", "endorsed_comment_list_url": None, "non_endorsed_comment_list_url": None, "read": False, "has_endorsed": False, "id": "test_thread", "type": "discussion", "response_count": 0, } self.register_get_thread_response(cs_thread) response = self.client.get(self.url) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.content), expected_response_data) self.assertEqual(httpretty.last_request().method, "GET") def test_retrieve_nonexistent_thread(self): self.register_get_thread_error_response(self.thread_id, 404) response = self.client.get(self.url) self.assertEqual(response.status_code, 404) @httpretty.activate @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class CommentViewSetRetrieveTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for CommentViewSet Retrieve""" def setUp(self): super(CommentViewSetRetrieveTest, self).setUp() self.url = reverse("comment-detail", kwargs={"comment_id": "test_comment"}) self.thread_id = "test_thread" self.comment_id = "test_comment" def make_comment_data(self, comment_id, parent_id=None, children=[]): # pylint: disable=W0102 """ Returns comment dict object as returned by comments service """ return make_minimal_cs_comment({ "id": comment_id, "parent_id": parent_id, "course_id": unicode(self.course.id), "thread_id": self.thread_id, "thread_type": "discussion", "username": self.user.username, "user_id": str(self.user.id), "created_at": "2015-06-03T00:00:00Z", "updated_at": "2015-06-03T00:00:00Z", "body": "Original body", "children": children, }) def test_basic(self): self.register_get_user_response(self.user) cs_comment_child = self.make_comment_data("test_child_comment", self.comment_id, children=[]) cs_comment = self.make_comment_data(self.comment_id, None, [cs_comment_child]) cs_thread = make_minimal_cs_thread({ "id": self.thread_id, "course_id": unicode(self.course.id), "children": [cs_comment], }) self.register_get_thread_response(cs_thread) self.register_get_comment_response(cs_comment) expected_response_data = { "id": "test_child_comment", "parent_id": self.comment_id, "thread_id": self.thread_id, "author": self.user.username, "author_label": None, "raw_body": "Original body", "rendered_body": "<p>Original body</p>", "created_at": "2015-06-03T00:00:00Z", "updated_at": "2015-06-03T00:00:00Z", "children": [], "endorsed_at": None, "endorsed": False, "endorsed_by": None, "endorsed_by_label": None, "voted": False, "vote_count": 0, "abuse_flagged": False, "editable_fields": ["abuse_flagged", "raw_body", "voted"] } response = self.client.get(self.url) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.content)['results'][0], expected_response_data) def test_retrieve_nonexistent_comment(self): self.register_get_comment_error_response(self.comment_id, 404) response = self.client.get(self.url) self.assertEqual(response.status_code, 404)
agpl-3.0
hermestrimegiste/patchtgtel
patchConnectionTogotelecom.py
1
2031
#-*- coding:utf-8 -*- __author__ = 'hermes' import socket from os import system from time import sleep from datetime import datetime global connectionName connectionName = 'TOGOTELECOM' # Definir le nom de votre reseau def is_connected(): # http://stackoverflow.com/questions/20913411/test-if-an-internet-connection-is-present-in-python try: #host = socket.gethostbyname("www.google.com") #socket.create_connection(('173.194.67.94', 80), 25) #methode 2 sans test de connection socket.gethostbyname("www.google.com") return True except: try: socket.create_connection(('173.194.67.94', 80), 15) return True except: pass pass return False def hardRestartNetwork(): system('nmcli nm enable false') system('nmcli nm enable true') sleep(5) system("nmcli con up id '%s'"% connectionName) def patchTogotelecom(): activeReseau = system('nmcli nm enable true') deconnectionSoft = system('nmcli dev disconnect iface ttyUSB0') sleep(5) if (deconnectionSoft == 0 or deconnectionSoft == 1536): activeTGTEL = system("nmcli con up id '%s'"% connectionName) if activeTGTEL == 768: # si Erreur : le délai d'attente de 90 sec a expiré. #system('modprobe --force-vermagic usb_wwan usbserial') hardRestartNetwork() else: # redemarrer le reseau si la methode soft ne marche pas hardRestartNetwork() if is_connected(): print(u'Connecté le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S'))) else: print(u'Tentative echoué le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S'))) # sleep(5) # debut de l execution du script #system('modprobe --force-vermagic usb_wwan usbserial') hardRestartNetwork() print(u'debut du script > %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S'))) sleep(5) while True: if is_connected(): sleep(60) else: print(u'Tentative de reconnexion le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S'))) patchTogotelecom()
gpl-2.0
ATIX-AG/ansible
lib/ansible/modules/windows/win_whoami.py
52
5530
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_whoami version_added: "2.5" short_description: Get information about the current user and process description: - Designed to return the same information as the C(whoami /all) command. - Also includes information missing from C(whoami) such as logon metadata like logon rights, id, type. notes: - If running this module with a non admin user, the logon rights will be an empty list as Administrator rights are required to query LSA for the information. author: - Jordan Borean (@jborean93) ''' EXAMPLES = r''' - name: get whoami information win_whoami: ''' RETURN = r''' authentication_package: description: The name of the authentication package used to authenticate the user in the session. returned: success type: str sample: Negotiate user_flags: description: The user flags for the logon session, see UserFlags in U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380128). returned: success type: str sample: Winlogon upn: description: The user principal name of the current user. returned: success type: str sample: Administrator@DOMAIN.COM logon_type: description: The logon type that identifies the logon method, see U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380129.aspx). returned: success type: str sample: Network privileges: description: A dictionary of privileges and their state on the logon token. returned: success type: dict sample: { "SeChangeNotifyPrivileges": "enabled-by-default", "SeRemoteShutdownPrivilege": "disabled", "SeDebugPrivilege": "enabled" } label: description: The mandatory label set to the logon session. returned: success type: complex contains: domain_name: description: The domain name of the label SID. returned: success type: str sample: Mandatory Label sid: description: The SID in string form. returned: success type: str sample: S-1-16-12288 account_name: description: The account name of the label SID. returned: success type: str sample: High Mandatory Level type: description: The type of SID. returned: success type: str sample: Label impersonation_level: description: The impersonation level of the token, only valid if C(token_type) is C(TokenImpersonation), see U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa379572.aspx). returned: success type: str sample: SecurityAnonymous login_time: description: The logon time in ISO 8601 format returned: success type: str sample: '2017-11-27T06:24:14.3321665+10:00' groups: description: A list of groups and attributes that the user is a member of. returned: success type: list sample: [ { "account_name": "Domain Users", "domain_name": "DOMAIN", "attributes": [ "Mandatory", "Enabled by default", "Enabled" ], "sid": "S-1-5-21-1654078763-769949647-2968445802-513", "type": "Group" }, { "account_name": "Administrators", "domain_name": "BUILTIN", "attributes": [ "Mandatory", "Enabled by default", "Enabled", "Owner" ], "sid": "S-1-5-32-544", "type": "Alias" } ] account: description: The running account SID details. returned: success type: complex contains: domain_name: description: The domain name of the account SID. returned: success type: str sample: DOMAIN sid: description: The SID in string form. returned: success type: str sample: S-1-5-21-1654078763-769949647-2968445802-500 account_name: description: The account name of the account SID. returned: success type: str sample: Administrator type: description: The type of SID. returned: success type: str sample: User login_domain: description: The name of the domain used to authenticate the owner of the session. returned: success type: str sample: DOMAIN rights: description: A list of logon rights assigned to the logon. returned: success and running user is a member of the local Administrators group type: list sample: [ "SeNetworkLogonRight", "SeInteractiveLogonRight", "SeBatchLogonRight", "SeRemoteInteractiveLogonRight" ] logon_server: description: The name of the server used to authentcate the owner of the logon session. returned: success type: str sample: DC01 logon_id: description: The unique identifier of the logon session. returned: success type: int sample: 20470143 dns_domain_name: description: The DNS name of the logon session, this is an empty string if this is not set. returned: success type: str sample: DOMAIN.COM token_type: description: The token type to indicate whether it is a primary or impersonation token. returned: success type: str sample: TokenPrimary '''
gpl-3.0
InAnimaTe/CouchPotatoServer
libs/bs4/builder/_htmlparser.py
412
8839
"""Use the HTMLParser library to parse HTML files that aren't too bad.""" __all__ = [ 'HTMLParserTreeBuilder', ] from HTMLParser import ( HTMLParser, HTMLParseError, ) import sys import warnings # Starting in Python 3.2, the HTMLParser constructor takes a 'strict' # argument, which we'd like to set to False. Unfortunately, # http://bugs.python.org/issue13273 makes strict=True a better bet # before Python 3.2.3. # # At the end of this file, we monkeypatch HTMLParser so that # strict=True works well on Python 3.2.2. major, minor, release = sys.version_info[:3] CONSTRUCTOR_TAKES_STRICT = ( major > 3 or (major == 3 and minor > 2) or (major == 3 and minor == 2 and release >= 3)) from bs4.element import ( CData, Comment, Declaration, Doctype, ProcessingInstruction, ) from bs4.dammit import EntitySubstitution, UnicodeDammit from bs4.builder import ( HTML, HTMLTreeBuilder, STRICT, ) HTMLPARSER = 'html.parser' class BeautifulSoupHTMLParser(HTMLParser): def handle_starttag(self, name, attrs): # XXX namespace attr_dict = {} for key, value in attrs: # Change None attribute values to the empty string # for consistency with the other tree builders. if value is None: value = '' attr_dict[key] = value attrvalue = '""' self.soup.handle_starttag(name, None, None, attr_dict) def handle_endtag(self, name): self.soup.handle_endtag(name) def handle_data(self, data): self.soup.handle_data(data) def handle_charref(self, name): # XXX workaround for a bug in HTMLParser. Remove this once # it's fixed. if name.startswith('x'): real_name = int(name.lstrip('x'), 16) elif name.startswith('X'): real_name = int(name.lstrip('X'), 16) else: real_name = int(name) try: data = unichr(real_name) except (ValueError, OverflowError), e: data = u"\N{REPLACEMENT CHARACTER}" self.handle_data(data) def handle_entityref(self, name): character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name) if character is not None: data = character else: data = "&%s;" % name self.handle_data(data) def handle_comment(self, data): self.soup.endData() self.soup.handle_data(data) self.soup.endData(Comment) def handle_decl(self, data): self.soup.endData() if data.startswith("DOCTYPE "): data = data[len("DOCTYPE "):] elif data == 'DOCTYPE': # i.e. "<!DOCTYPE>" data = '' self.soup.handle_data(data) self.soup.endData(Doctype) def unknown_decl(self, data): if data.upper().startswith('CDATA['): cls = CData data = data[len('CDATA['):] else: cls = Declaration self.soup.endData() self.soup.handle_data(data) self.soup.endData(cls) def handle_pi(self, data): self.soup.endData() if data.endswith("?") and data.lower().startswith("xml"): # "An XHTML processing instruction using the trailing '?' # will cause the '?' to be included in data." - HTMLParser # docs. # # Strip the question mark so we don't end up with two # question marks. data = data[:-1] self.soup.handle_data(data) self.soup.endData(ProcessingInstruction) class HTMLParserTreeBuilder(HTMLTreeBuilder): is_xml = False features = [HTML, STRICT, HTMLPARSER] def __init__(self, *args, **kwargs): if CONSTRUCTOR_TAKES_STRICT: kwargs['strict'] = False self.parser_args = (args, kwargs) def prepare_markup(self, markup, user_specified_encoding=None, document_declared_encoding=None): """ :return: A 4-tuple (markup, original encoding, encoding declared within markup, whether any characters had to be replaced with REPLACEMENT CHARACTER). """ if isinstance(markup, unicode): yield (markup, None, None, False) return try_encodings = [user_specified_encoding, document_declared_encoding] dammit = UnicodeDammit(markup, try_encodings, is_html=True) yield (dammit.markup, dammit.original_encoding, dammit.declared_html_encoding, dammit.contains_replacement_characters) def feed(self, markup): args, kwargs = self.parser_args parser = BeautifulSoupHTMLParser(*args, **kwargs) parser.soup = self.soup try: parser.feed(markup) except HTMLParseError, e: warnings.warn(RuntimeWarning( "Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help.")) raise e # Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some # 3.2.3 code. This ensures they don't treat markup like <p></p> as a # string. # # XXX This code can be removed once most Python 3 users are on 3.2.3. if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT: import re attrfind_tolerant = re.compile( r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*' r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?') HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant locatestarttagend = re.compile(r""" <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name (?:\s+ # whitespace before attribute name (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name (?:\s*=\s* # value indicator (?:'[^']*' # LITA-enclosed value |\"[^\"]*\" # LIT-enclosed value |[^'\">\s]+ # bare value ) )? ) )* \s* # trailing whitespace """, re.VERBOSE) BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend from html.parser import tagfind, attrfind def parse_starttag(self, i): self.__starttag_text = None endpos = self.check_for_whole_start_tag(i) if endpos < 0: return endpos rawdata = self.rawdata self.__starttag_text = rawdata[i:endpos] # Now parse the data between i+1 and j into a tag and attrs attrs = [] match = tagfind.match(rawdata, i+1) assert match, 'unexpected call to parse_starttag()' k = match.end() self.lasttag = tag = rawdata[i+1:k].lower() while k < endpos: if self.strict: m = attrfind.match(rawdata, k) else: m = attrfind_tolerant.match(rawdata, k) if not m: break attrname, rest, attrvalue = m.group(1, 2, 3) if not rest: attrvalue = None elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ attrvalue[:1] == '"' == attrvalue[-1:]: attrvalue = attrvalue[1:-1] if attrvalue: attrvalue = self.unescape(attrvalue) attrs.append((attrname.lower(), attrvalue)) k = m.end() end = rawdata[k:endpos].strip() if end not in (">", "/>"): lineno, offset = self.getpos() if "\n" in self.__starttag_text: lineno = lineno + self.__starttag_text.count("\n") offset = len(self.__starttag_text) \ - self.__starttag_text.rfind("\n") else: offset = offset + len(self.__starttag_text) if self.strict: self.error("junk characters in start tag: %r" % (rawdata[k:endpos][:20],)) self.handle_data(rawdata[i:endpos]) return endpos if end.endswith('/>'): # XHTML-style empty tag: <span attr="value" /> self.handle_startendtag(tag, attrs) else: self.handle_starttag(tag, attrs) if tag in self.CDATA_CONTENT_ELEMENTS: self.set_cdata_mode(tag) return endpos def set_cdata_mode(self, elem): self.cdata_elem = elem.lower() self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I) BeautifulSoupHTMLParser.parse_starttag = parse_starttag BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode CONSTRUCTOR_TAKES_STRICT = True
gpl-3.0
nigelb/SerialGrabber
serial_grabber/cli.py
1
2827
#!/usr/bin/env python # SerialGrabber reads data from a serial port and processes it with the # configured processor. # Copyright (C) 2012 NigelB # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import signal import time from SerialGrabber_Storage import storage_cache from serial_grabber.commander import MultiProcessParameterFactory from serial_grabber.util import config_helper from serial_grabber.watchdog import running, counter, Watchdog from serial_grabber.processor import ProcessorManager class status: def __init__(self, logger): self.logger = logger def set_tooltip(self, tooltip): self.logger.info(tooltip) def register_handler(running, watchdog, reader, processor, command): def signal_handler(signal, frame): print 'You pressed Ctrl+C!' running.running = False if command: command.stop() watchdog.join() if reader: reader.close() exit(0) signal.signal(signal.SIGINT, signal_handler) def start(logger, reader, processor, command): try: si = status(logger) isRunning = running(True) c = counter(si) params = config_helper({ "counter": c, "running": isRunning }) if issubclass(command.__class__, MultiProcessParameterFactory): command.populate_parameters(params) if issubclass(reader.__class__, MultiProcessParameterFactory): reader.populate_parameters(params) if issubclass(processor.__class__, MultiProcessParameterFactory): processor.populate_parameters(params) watchdog = Watchdog(isRunning) register_handler(isRunning, watchdog, reader, processor, command) if reader: watchdog.start_thread(reader, (isRunning, c, params), "Runner") if processor: watchdog.start_thread(ProcessorManager(processor), (isRunning, c, params), "Processor") if command and reader: watchdog.start_thread(command, (isRunning, c, params), "Commander") while isRunning.running: time.sleep(1) finally: storage_cache.close_cache()
gpl-2.0
Tianhao-Gu/kb_uploadmethods
lib/installed_clients/ConditionUtilsClient.py
2
5789
# -*- coding: utf-8 -*- ############################################################ # # Autogenerated by the KBase type compiler - # any changes made here will be overwritten # ############################################################ from __future__ import print_function # the following is a hack to get the baseclient to import whether we're in a # package or not. This makes pep8 unhappy hence the annotations. try: # baseclient and this client are in a package from .baseclient import BaseClient as _BaseClient # @UnusedImport except ImportError: # no they aren't from baseclient import BaseClient as _BaseClient # @Reimport class ConditionUtils(object): def __init__( self, url=None, timeout=30 * 60, user_id=None, password=None, token=None, ignore_authrc=False, trust_all_ssl_certificates=False, auth_svc='https://ci.kbase.us/services/auth/api/legacy/KBase/Sessions/Login', service_ver='release', async_job_check_time_ms=100, async_job_check_time_scale_percent=150, async_job_check_max_time_ms=300000): if url is None: raise ValueError('A url is required') self._service_ver = service_ver self._client = _BaseClient( url, timeout=timeout, user_id=user_id, password=password, token=token, ignore_authrc=ignore_authrc, trust_all_ssl_certificates=trust_all_ssl_certificates, auth_svc=auth_svc, async_job_check_time_ms=async_job_check_time_ms, async_job_check_time_scale_percent=async_job_check_time_scale_percent, async_job_check_max_time_ms=async_job_check_max_time_ms) def get_conditions(self, params, context=None): """ :param params: instance of type "GetConditionParams" (Get condition information in a friendly format ws_condition_set_id condition_set_ref list<string> conditions - Optional: Which conditions should be returned. defaults to all conditions in the set Returns {condition_label: {ontology_type(e.g. GO): [Factors]}}) -> structure: parameter "condition_set_ref" of type "ws_condition_set_id" (@id ws KBaseExperiments.ConditionSet), parameter "conditions" of list of String :returns: instance of type "GetConditionOutput" -> structure: parameter "conditions" of mapping from String to mapping from String to list of type "Factor" (Internally this is used to store factor information (without the value term) and also a format for returning data in a useful form from get_conditions @optional unit unit_ont_id value) -> structure: parameter "factor" of String, parameter "factor_ont_ref" of String, parameter "factor_ont_id" of String, parameter "unit" of String, parameter "unit_ont_id" of String, parameter "value" of String """ return self._client.run_job('ConditionUtils.get_conditions', [params], self._service_ver, context) def file_to_condition_set(self, params, context=None): """ :param params: instance of type "FileToConditionSetParams" (input_shock_id and input_file_path - alternative input params,) -> structure: parameter "input_shock_id" of String, parameter "input_file_path" of String, parameter "output_ws_id" of String, parameter "output_obj_name" of String :returns: instance of type "FileToConditionSetOutput" -> structure: parameter "condition_set_ref" of type "ws_condition_set_id" (@id ws KBaseExperiments.ConditionSet) """ return self._client.run_job('ConditionUtils.file_to_condition_set', [params], self._service_ver, context) def condition_set_to_tsv_file(self, params, context=None): """ :param params: instance of type "ConditionSetToTsvFileParams" -> structure: parameter "input_ref" of type "ws_condition_set_id" (@id ws KBaseExperiments.ConditionSet), parameter "destination_dir" of String :returns: instance of type "ConditionSetToTsvFileOutput" -> structure: parameter "file_path" of String """ return self._client.run_job('ConditionUtils.condition_set_to_tsv_file', [params], self._service_ver, context) def export_condition_set_tsv(self, params, context=None): """ :param params: instance of type "ExportConditionSetParams" -> structure: parameter "input_ref" of type "ws_condition_set_id" (@id ws KBaseExperiments.ConditionSet) :returns: instance of type "ExportConditionSetOutput" -> structure: parameter "shock_id" of String """ return self._client.run_job('ConditionUtils.export_condition_set_tsv', [params], self._service_ver, context) def export_condition_set_excel(self, params, context=None): """ :param params: instance of type "ExportConditionSetParams" -> structure: parameter "input_ref" of type "ws_condition_set_id" (@id ws KBaseExperiments.ConditionSet) :returns: instance of type "ExportConditionSetOutput" -> structure: parameter "shock_id" of String """ return self._client.run_job('ConditionUtils.export_condition_set_excel', [params], self._service_ver, context) def status(self, context=None): return self._client.run_job('ConditionUtils.status', [], self._service_ver, context)
mit
mdhaber/scipy
scipy/optimize/_lsq/least_squares.py
12
39190
"""Generic interface for least-squares minimization.""" from warnings import warn import numpy as np from numpy.linalg import norm from scipy.sparse import issparse, csr_matrix from scipy.sparse.linalg import LinearOperator from scipy.optimize import _minpack, OptimizeResult from scipy.optimize._numdiff import approx_derivative, group_columns from .trf import trf from .dogbox import dogbox from .common import EPS, in_bounds, make_strictly_feasible TERMINATION_MESSAGES = { -1: "Improper input parameters status returned from `leastsq`", 0: "The maximum number of function evaluations is exceeded.", 1: "`gtol` termination condition is satisfied.", 2: "`ftol` termination condition is satisfied.", 3: "`xtol` termination condition is satisfied.", 4: "Both `ftol` and `xtol` termination conditions are satisfied." } FROM_MINPACK_TO_COMMON = { 0: -1, # Improper input parameters from MINPACK. 1: 2, 2: 3, 3: 4, 4: 1, 5: 0 # There are 6, 7, 8 for too small tolerance parameters, # but we guard against it by checking ftol, xtol, gtol beforehand. } def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step): n = x0.size if diff_step is None: epsfcn = EPS else: epsfcn = diff_step**2 # Compute MINPACK's `diag`, which is inverse of our `x_scale` and # ``x_scale='jac'`` corresponds to ``diag=None``. if isinstance(x_scale, str) and x_scale == 'jac': diag = None else: diag = 1 / x_scale full_output = True col_deriv = False factor = 100.0 if jac is None: if max_nfev is None: # n squared to account for Jacobian evaluations. max_nfev = 100 * n * (n + 1) x, info, status = _minpack._lmdif( fun, x0, (), full_output, ftol, xtol, gtol, max_nfev, epsfcn, factor, diag) else: if max_nfev is None: max_nfev = 100 * n x, info, status = _minpack._lmder( fun, jac, x0, (), full_output, col_deriv, ftol, xtol, gtol, max_nfev, factor, diag) f = info['fvec'] if callable(jac): J = jac(x) else: J = np.atleast_2d(approx_derivative(fun, x)) cost = 0.5 * np.dot(f, f) g = J.T.dot(f) g_norm = norm(g, ord=np.inf) nfev = info['nfev'] njev = info.get('njev', None) status = FROM_MINPACK_TO_COMMON[status] active_mask = np.zeros_like(x0, dtype=int) return OptimizeResult( x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm, active_mask=active_mask, nfev=nfev, njev=njev, status=status) def prepare_bounds(bounds, n): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, n) if ub.ndim == 0: ub = np.resize(ub, n) return lb, ub def check_tolerance(ftol, xtol, gtol, method): def check(tol, name): if tol is None: tol = 0 elif tol < EPS: warn("Setting `{}` below the machine epsilon ({:.2e}) effectively " "disables the corresponding termination condition." .format(name, EPS)) return tol ftol = check(ftol, "ftol") xtol = check(xtol, "xtol") gtol = check(gtol, "gtol") if method == "lm" and (ftol < EPS or xtol < EPS or gtol < EPS): raise ValueError("All tolerances must be higher than machine epsilon " "({:.2e}) for method 'lm'.".format(EPS)) elif ftol < EPS and xtol < EPS and gtol < EPS: raise ValueError("At least one of the tolerances must be higher than " "machine epsilon ({:.2e}).".format(EPS)) return ftol, xtol, gtol def check_x_scale(x_scale, x0): if isinstance(x_scale, str) and x_scale == 'jac': return x_scale try: x_scale = np.asarray(x_scale, dtype=float) valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0) except (ValueError, TypeError): valid = False if not valid: raise ValueError("`x_scale` must be 'jac' or array_like with " "positive numbers.") if x_scale.ndim == 0: x_scale = np.resize(x_scale, x0.shape) if x_scale.shape != x0.shape: raise ValueError("Inconsistent shapes between `x_scale` and `x0`.") return x_scale def check_jac_sparsity(jac_sparsity, m, n): if jac_sparsity is None: return None if not issparse(jac_sparsity): jac_sparsity = np.atleast_2d(jac_sparsity) if jac_sparsity.shape != (m, n): raise ValueError("`jac_sparsity` has wrong shape.") return jac_sparsity, group_columns(jac_sparsity) # Loss functions. def huber(z, rho, cost_only): mask = z <= 1 rho[0, mask] = z[mask] rho[0, ~mask] = 2 * z[~mask]**0.5 - 1 if cost_only: return rho[1, mask] = 1 rho[1, ~mask] = z[~mask]**-0.5 rho[2, mask] = 0 rho[2, ~mask] = -0.5 * z[~mask]**-1.5 def soft_l1(z, rho, cost_only): t = 1 + z rho[0] = 2 * (t**0.5 - 1) if cost_only: return rho[1] = t**-0.5 rho[2] = -0.5 * t**-1.5 def cauchy(z, rho, cost_only): rho[0] = np.log1p(z) if cost_only: return t = 1 + z rho[1] = 1 / t rho[2] = -1 / t**2 def arctan(z, rho, cost_only): rho[0] = np.arctan(z) if cost_only: return t = 1 + z**2 rho[1] = 1 / t rho[2] = -2 * z / t**2 IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1, cauchy=cauchy, arctan=arctan) def construct_loss_function(m, loss, f_scale): if loss == 'linear': return None if not callable(loss): loss = IMPLEMENTED_LOSSES[loss] rho = np.empty((3, m)) def loss_function(f, cost_only=False): z = (f / f_scale) ** 2 loss(z, rho, cost_only=cost_only) if cost_only: return 0.5 * f_scale ** 2 * np.sum(rho[0]) rho[0] *= f_scale ** 2 rho[2] /= f_scale ** 2 return rho else: def loss_function(f, cost_only=False): z = (f / f_scale) ** 2 rho = loss(z) if cost_only: return 0.5 * f_scale ** 2 * np.sum(rho[0]) rho[0] *= f_scale ** 2 rho[2] /= f_scale ** 2 return rho return loss_function def least_squares( fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf', ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}): """Solve a nonlinear least-squares problem with bounds on the variables. Given the residuals f(x) (an m-D real function of n real variables) and the loss function rho(s) (a scalar function), `least_squares` finds a local minimum of the cost function F(x):: minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1) subject to lb <= x <= ub The purpose of the loss function rho(s) is to reduce the influence of outliers on the solution. Parameters ---------- fun : callable Function which computes the vector of residuals, with the signature ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with respect to its first argument. The argument ``x`` passed to this function is an ndarray of shape (n,) (never a scalar, even for n=1). It must allocate and return a 1-D array_like of shape (m,) or a scalar. If the argument ``x`` is complex or the function ``fun`` returns complex residuals, it must be wrapped in a real function of real arguments, as shown at the end of the Examples section. x0 : array_like with shape (n,) or float Initial guess on independent variables. If float, it will be treated as a 1-D array with one element. jac : {'2-point', '3-point', 'cs', callable}, optional Method of computing the Jacobian matrix (an m-by-n matrix, where element (i, j) is the partial derivative of f[i] with respect to x[j]). The keywords select a finite difference scheme for numerical estimation. The scheme '3-point' is more accurate, but requires twice as many operations as '2-point' (default). The scheme 'cs' uses complex steps, and while potentially the most accurate, it is applicable only when `fun` correctly handles complex inputs and can be analytically continued to the complex plane. Method 'lm' always uses the '2-point' scheme. If callable, it is used as ``jac(x, *args, **kwargs)`` and should return a good approximation (or the exact value) for the Jacobian as an array_like (np.atleast_2d is applied), a sparse matrix (csr_matrix preferred for performance) or a `scipy.sparse.linalg.LinearOperator`. bounds : 2-tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each array must match the size of `x0` or be a scalar, in the latter case a bound will be the same for all variables. Use ``np.inf`` with an appropriate sign to disable bounds on all or some variables. method : {'trf', 'dogbox', 'lm'}, optional Algorithm to perform minimization. * 'trf' : Trust Region Reflective algorithm, particularly suitable for large sparse problems with bounds. Generally robust method. * 'dogbox' : dogleg algorithm with rectangular trust regions, typical use case is small problems with bounds. Not recommended for problems with rank-deficient Jacobian. * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK. Doesn't handle bounds and sparse Jacobians. Usually the most efficient method for small unconstrained problems. Default is 'trf'. See Notes for more information. ftol : float or None, optional Tolerance for termination by the change of the cost function. Default is 1e-8. The optimization process is stopped when ``dF < ftol * F``, and there was an adequate agreement between a local quadratic model and the true model in the last step. If None and 'method' is not 'lm', the termination by this condition is disabled. If 'method' is 'lm', this tolerance must be higher than machine epsilon. xtol : float or None, optional Tolerance for termination by the change of the independent variables. Default is 1e-8. The exact condition depends on the `method` used: * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``. * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is a trust-region radius and ``xs`` is the value of ``x`` scaled according to `x_scale` parameter (see below). If None and 'method' is not 'lm', the termination by this condition is disabled. If 'method' is 'lm', this tolerance must be higher than machine epsilon. gtol : float or None, optional Tolerance for termination by the norm of the gradient. Default is 1e-8. The exact condition depends on a `method` used: * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where ``g_scaled`` is the value of the gradient scaled to account for the presence of the bounds [STIR]_. * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where ``g_free`` is the gradient with respect to the variables which are not in the optimal state on the boundary. * For 'lm' : the maximum absolute value of the cosine of angles between columns of the Jacobian and the residual vector is less than `gtol`, or the residual vector is zero. If None and 'method' is not 'lm', the termination by this condition is disabled. If 'method' is 'lm', this tolerance must be higher than machine epsilon. x_scale : array_like or 'jac', optional Characteristic scale of each variable. Setting `x_scale` is equivalent to reformulating the problem in scaled variables ``xs = x / x_scale``. An alternative view is that the size of a trust region along jth dimension is proportional to ``x_scale[j]``. Improved convergence may be achieved by setting `x_scale` such that a step of a given size along any of the scaled variables has a similar effect on the cost function. If set to 'jac', the scale is iteratively updated using the inverse norms of the columns of the Jacobian matrix (as described in [JJMore]_). loss : str or callable, optional Determines the loss function. The following keyword values are allowed: * 'linear' (default) : ``rho(z) = z``. Gives a standard least-squares problem. * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth approximation of l1 (absolute value) loss. Usually a good choice for robust least squares. * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works similarly to 'soft_l1'. * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers influence, but may cause difficulties in optimization process. * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on a single residual, has properties similar to 'cauchy'. If callable, it must take a 1-D ndarray ``z=f**2`` and return an array_like with shape (3, m) where row 0 contains function values, row 1 contains first derivatives and row 2 contains second derivatives. Method 'lm' supports only 'linear' loss. f_scale : float, optional Value of soft margin between inlier and outlier residuals, default is 1.0. The loss function is evaluated as follows ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`, and ``rho`` is determined by `loss` parameter. This parameter has no effect with ``loss='linear'``, but for other `loss` values it is of crucial importance. max_nfev : None or int, optional Maximum number of function evaluations before the termination. If None (default), the value is chosen automatically: * For 'trf' and 'dogbox' : 100 * n. * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1) otherwise (because 'lm' counts function calls in Jacobian estimation). diff_step : None or array_like, optional Determines the relative step size for the finite difference approximation of the Jacobian. The actual step is computed as ``x * diff_step``. If None (default), then `diff_step` is taken to be a conventional "optimal" power of machine epsilon for the finite difference scheme used [NR]_. tr_solver : {None, 'exact', 'lsmr'}, optional Method for solving trust-region subproblems, relevant only for 'trf' and 'dogbox' methods. * 'exact' is suitable for not very large problems with dense Jacobian matrices. The computational complexity per iteration is comparable to a singular value decomposition of the Jacobian matrix. * 'lsmr' is suitable for problems with sparse and large Jacobian matrices. It uses the iterative procedure `scipy.sparse.linalg.lsmr` for finding a solution of a linear least-squares problem and only requires matrix-vector product evaluations. If None (default), the solver is chosen based on the type of Jacobian returned on the first iteration. tr_options : dict, optional Keyword options passed to trust-region solver. * ``tr_solver='exact'``: `tr_options` are ignored. * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`. Additionally, ``method='trf'`` supports 'regularize' option (bool, default is True), which adds a regularization term to the normal equation, which improves convergence if the Jacobian is rank-deficient [Byrd]_ (eq. 3.4). jac_sparsity : {None, array_like, sparse matrix}, optional Defines the sparsity structure of the Jacobian matrix for finite difference estimation, its shape must be (m, n). If the Jacobian has only few non-zero elements in *each* row, providing the sparsity structure will greatly speed up the computations [Curtis]_. A zero entry means that a corresponding element in the Jacobian is identically zero. If provided, forces the use of 'lsmr' trust-region solver. If None (default), then dense differencing will be used. Has no effect for 'lm' method. verbose : {0, 1, 2}, optional Level of algorithm's verbosity: * 0 (default) : work silently. * 1 : display a termination report. * 2 : display progress during iterations (not supported by 'lm' method). args, kwargs : tuple and dict, optional Additional arguments passed to `fun` and `jac`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)`` and the same for `jac`. Returns ------- result : OptimizeResult `OptimizeResult` with the following fields defined: x : ndarray, shape (n,) Solution found. cost : float Value of the cost function at the solution. fun : ndarray, shape (m,) Vector of residuals at the solution. jac : ndarray, sparse matrix or LinearOperator, shape (m, n) Modified Jacobian matrix at the solution, in the sense that J^T J is a Gauss-Newton approximation of the Hessian of the cost function. The type is the same as the one used by the algorithm. grad : ndarray, shape (m,) Gradient of the cost function at the solution. optimality : float First-order optimality measure. In unconstrained problems, it is always the uniform norm of the gradient. In constrained problems, it is the quantity which was compared with `gtol` during iterations. active_mask : ndarray of int, shape (n,) Each component shows whether a corresponding constraint is active (that is, whether a variable is at the bound): * 0 : a constraint is not active. * -1 : a lower bound is active. * 1 : an upper bound is active. Might be somewhat arbitrary for 'trf' method as it generates a sequence of strictly feasible iterates and `active_mask` is determined within a tolerance threshold. nfev : int Number of function evaluations done. Methods 'trf' and 'dogbox' do not count function calls for numerical Jacobian approximation, as opposed to 'lm' method. njev : int or None Number of Jacobian evaluations done. If numerical Jacobian approximation is used in 'lm' method, it is set to None. status : int The reason for algorithm termination: * -1 : improper input parameters status returned from MINPACK. * 0 : the maximum number of function evaluations is exceeded. * 1 : `gtol` termination condition is satisfied. * 2 : `ftol` termination condition is satisfied. * 3 : `xtol` termination condition is satisfied. * 4 : Both `ftol` and `xtol` termination conditions are satisfied. message : str Verbal description of the termination reason. success : bool True if one of the convergence criteria is satisfied (`status` > 0). See Also -------- leastsq : A legacy wrapper for the MINPACK implementation of the Levenberg-Marquadt algorithm. curve_fit : Least-squares minimization applied to a curve-fitting problem. Notes ----- Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares algorithms implemented in MINPACK (lmder, lmdif). It runs the Levenberg-Marquardt algorithm formulated as a trust-region type algorithm. The implementation is based on paper [JJMore]_, it is very robust and efficient with a lot of smart tricks. It should be your first choice for unconstrained problems. Note that it doesn't support bounds. Also, it doesn't work when m < n. Method 'trf' (Trust Region Reflective) is motivated by the process of solving a system of equations, which constitute the first-order optimality condition for a bound-constrained minimization problem as formulated in [STIR]_. The algorithm iteratively solves trust-region subproblems augmented by a special diagonal quadratic term and with trust-region shape determined by the distance from the bounds and the direction of the gradient. This enhancements help to avoid making steps directly into bounds and efficiently explore the whole space of variables. To further improve convergence, the algorithm considers search directions reflected from the bounds. To obey theoretical requirements, the algorithm keeps iterates strictly feasible. With dense Jacobians trust-region subproblems are solved by an exact method very similar to the one described in [JJMore]_ (and implemented in MINPACK). The difference from the MINPACK implementation is that a singular value decomposition of a Jacobian matrix is done once per iteration, instead of a QR decomposition and series of Givens rotation eliminations. For large sparse Jacobians a 2-D subspace approach of solving trust-region subproblems is used [STIR]_, [Byrd]_. The subspace is spanned by a scaled gradient and an approximate Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no constraints are imposed the algorithm is very similar to MINPACK and has generally comparable performance. The algorithm works quite robust in unbounded and bounded problems, thus it is chosen as a default algorithm. Method 'dogbox' operates in a trust-region framework, but considers rectangular trust regions as opposed to conventional ellipsoids [Voglis]_. The intersection of a current trust region and initial bounds is again rectangular, so on each iteration a quadratic minimization problem subject to bound constraints is solved approximately by Powell's dogleg method [NumOpt]_. The required Gauss-Newton step can be computed exactly for dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large sparse Jacobians. The algorithm is likely to exhibit slow convergence when the rank of Jacobian is less than the number of variables. The algorithm often outperforms 'trf' in bounded problems with a small number of variables. Robust loss functions are implemented as described in [BA]_. The idea is to modify a residual vector and a Jacobian matrix on each iteration such that computed gradient and Gauss-Newton Hessian approximation match the true gradient and Hessian approximation of the cost function. Then the algorithm proceeds in a normal way, i.e., robust loss functions are implemented as a simple wrapper over standard least-squares algorithms. .. versionadded:: 0.17.0 References ---------- .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior, and Conjugate Gradient Method for Large-Scale Bound-Constrained Minimization Problems," SIAM Journal on Scientific Computing, Vol. 21, Number 1, pp 1-23, 1999. .. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific Computing. 3rd edition", Sec. 5.7. .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate solution of the trust region problem by minimization over two-dimensional subspaces", Math. Programming, 40, pp. 247-263, 1988. .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13, pp. 117-120, 1974. .. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977. .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg Approach for Unconstrained and Bound Constrained Nonlinear Optimization", WSEAS International Conference on Applied Mathematics, Corfu, Greece, 2004. .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition", Chapter 4. .. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis", Proceedings of the International Workshop on Vision Algorithms: Theory and Practice, pp. 298-372, 1999. Examples -------- In this example we find a minimum of the Rosenbrock function without bounds on independent variables. >>> def fun_rosenbrock(x): ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])]) Notice that we only provide the vector of the residuals. The algorithm constructs the cost function as a sum of squares of the residuals, which gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``. >>> from scipy.optimize import least_squares >>> x0_rosenbrock = np.array([2, 2]) >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock) >>> res_1.x array([ 1., 1.]) >>> res_1.cost 9.8669242910846867e-30 >>> res_1.optimality 8.8928864934219529e-14 We now constrain the variables, in such a way that the previous solution becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``. We also provide the analytic Jacobian: >>> def jac_rosenbrock(x): ... return np.array([ ... [-20 * x[0], 10], ... [-1, 0]]) Putting this all together, we see that the new solution lies on the bound: >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock, ... bounds=([-np.inf, 1.5], np.inf)) >>> res_2.x array([ 1.22437075, 1.5 ]) >>> res_2.cost 0.025213093946805685 >>> res_2.optimality 1.5885401433157753e-07 Now we solve a system of equations (i.e., the cost function should be zero at a minimum) for a Broyden tridiagonal vector-valued function of 100000 variables: >>> def fun_broyden(x): ... f = (3 - x) * x + 1 ... f[1:] -= x[:-1] ... f[:-1] -= 2 * x[1:] ... return f The corresponding Jacobian matrix is sparse. We tell the algorithm to estimate it by finite differences and provide the sparsity structure of Jacobian to significantly speed up this process. >>> from scipy.sparse import lil_matrix >>> def sparsity_broyden(n): ... sparsity = lil_matrix((n, n), dtype=int) ... i = np.arange(n) ... sparsity[i, i] = 1 ... i = np.arange(1, n) ... sparsity[i, i - 1] = 1 ... i = np.arange(n - 1) ... sparsity[i, i + 1] = 1 ... return sparsity ... >>> n = 100000 >>> x0_broyden = -np.ones(n) ... >>> res_3 = least_squares(fun_broyden, x0_broyden, ... jac_sparsity=sparsity_broyden(n)) >>> res_3.cost 4.5687069299604613e-23 >>> res_3.optimality 1.1650454296851518e-11 Let's also solve a curve fitting problem using robust loss function to take care of outliers in the data. Define the model function as ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an observation and a, b, c are parameters to estimate. First, define the function which generates the data with noise and outliers, define the model parameters, and generate data: >>> from numpy.random import default_rng >>> rng = default_rng() >>> def gen_data(t, a, b, c, noise=0., n_outliers=0, seed=None): ... rng = default_rng(seed) ... ... y = a + b * np.exp(t * c) ... ... error = noise * rng.standard_normal(t.size) ... outliers = rng.integers(0, t.size, n_outliers) ... error[outliers] *= 10 ... ... return y + error ... >>> a = 0.5 >>> b = 2.0 >>> c = -1 >>> t_min = 0 >>> t_max = 10 >>> n_points = 15 ... >>> t_train = np.linspace(t_min, t_max, n_points) >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3) Define function for computing residuals and initial estimate of parameters. >>> def fun(x, t, y): ... return x[0] + x[1] * np.exp(x[2] * t) - y ... >>> x0 = np.array([1.0, 1.0, 0.0]) Compute a standard least-squares solution: >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train)) Now compute two solutions with two different robust loss functions. The parameter `f_scale` is set to 0.1, meaning that inlier residuals should not significantly exceed 0.1 (the noise level used). >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1, ... args=(t_train, y_train)) >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1, ... args=(t_train, y_train)) And, finally, plot all the curves. We see that by selecting an appropriate `loss` we can get estimates close to optimal even in the presence of strong outliers. But keep in mind that generally it is recommended to try 'soft_l1' or 'huber' losses first (if at all necessary) as the other two options may cause difficulties in optimization process. >>> t_test = np.linspace(t_min, t_max, n_points * 10) >>> y_true = gen_data(t_test, a, b, c) >>> y_lsq = gen_data(t_test, *res_lsq.x) >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x) >>> y_log = gen_data(t_test, *res_log.x) ... >>> import matplotlib.pyplot as plt >>> plt.plot(t_train, y_train, 'o') >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true') >>> plt.plot(t_test, y_lsq, label='linear loss') >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss') >>> plt.plot(t_test, y_log, label='cauchy loss') >>> plt.xlabel("t") >>> plt.ylabel("y") >>> plt.legend() >>> plt.show() In the next example, we show how complex-valued residual functions of complex variables can be optimized with ``least_squares()``. Consider the following function: >>> def f(z): ... return z - (0.5 + 0.5j) We wrap it into a function of real variables that returns real residuals by simply handling the real and imaginary parts as independent variables: >>> def f_wrap(x): ... fx = f(x[0] + 1j*x[1]) ... return np.array([fx.real, fx.imag]) Thus, instead of the original m-D complex function of n complex variables we optimize a 2m-D real function of 2n real variables: >>> from scipy.optimize import least_squares >>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1])) >>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j >>> z (0.49999999999925893+0.49999999999925893j) """ if method not in ['trf', 'dogbox', 'lm']: raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.") if jac not in ['2-point', '3-point', 'cs'] and not callable(jac): raise ValueError("`jac` must be '2-point', '3-point', 'cs' or " "callable.") if tr_solver not in [None, 'exact', 'lsmr']: raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.") if loss not in IMPLEMENTED_LOSSES and not callable(loss): raise ValueError("`loss` must be one of {0} or a callable." .format(IMPLEMENTED_LOSSES.keys())) if method == 'lm' and loss != 'linear': raise ValueError("method='lm' supports only 'linear' loss function.") if verbose not in [0, 1, 2]: raise ValueError("`verbose` must be in [0, 1, 2].") if len(bounds) != 2: raise ValueError("`bounds` must contain 2 elements.") if max_nfev is not None and max_nfev <= 0: raise ValueError("`max_nfev` must be None or positive integer.") if np.iscomplexobj(x0): raise ValueError("`x0` must be real.") x0 = np.atleast_1d(x0).astype(float) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") lb, ub = prepare_bounds(bounds, x0.shape[0]) if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)): raise ValueError("Method 'lm' doesn't support bounds.") if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if np.any(lb >= ub): raise ValueError("Each lower bound must be strictly less than each " "upper bound.") if not in_bounds(x0, lb, ub): raise ValueError("`x0` is infeasible.") x_scale = check_x_scale(x_scale, x0) ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol, method) def fun_wrapped(x): return np.atleast_1d(fun(x, *args, **kwargs)) if method == 'trf': x0 = make_strictly_feasible(x0, lb, ub) f0 = fun_wrapped(x0) if f0.ndim != 1: raise ValueError("`fun` must return at most 1-d array_like. " "f0.shape: {0}".format(f0.shape)) if not np.all(np.isfinite(f0)): raise ValueError("Residuals are not finite in the initial point.") n = x0.size m = f0.size if method == 'lm' and m < n: raise ValueError("Method 'lm' doesn't work when the number of " "residuals is less than the number of variables.") loss_function = construct_loss_function(m, loss, f_scale) if callable(loss): rho = loss_function(f0) if rho.shape != (3, m): raise ValueError("The return value of `loss` callable has wrong " "shape.") initial_cost = 0.5 * np.sum(rho[0]) elif loss_function is not None: initial_cost = loss_function(f0, cost_only=True) else: initial_cost = 0.5 * np.dot(f0, f0) if callable(jac): J0 = jac(x0, *args, **kwargs) if issparse(J0): J0 = J0.tocsr() def jac_wrapped(x, _=None): return jac(x, *args, **kwargs).tocsr() elif isinstance(J0, LinearOperator): def jac_wrapped(x, _=None): return jac(x, *args, **kwargs) else: J0 = np.atleast_2d(J0) def jac_wrapped(x, _=None): return np.atleast_2d(jac(x, *args, **kwargs)) else: # Estimate Jacobian by finite differences. if method == 'lm': if jac_sparsity is not None: raise ValueError("method='lm' does not support " "`jac_sparsity`.") if jac != '2-point': warn("jac='{0}' works equivalently to '2-point' " "for method='lm'.".format(jac)) J0 = jac_wrapped = None else: if jac_sparsity is not None and tr_solver == 'exact': raise ValueError("tr_solver='exact' is incompatible " "with `jac_sparsity`.") jac_sparsity = check_jac_sparsity(jac_sparsity, m, n) def jac_wrapped(x, f): J = approx_derivative(fun, x, rel_step=diff_step, method=jac, f0=f, bounds=bounds, args=args, kwargs=kwargs, sparsity=jac_sparsity) if J.ndim != 2: # J is guaranteed not sparse. J = np.atleast_2d(J) return J J0 = jac_wrapped(x0, f0) if J0 is not None: if J0.shape != (m, n): raise ValueError( "The return value of `jac` has wrong shape: expected {0}, " "actual {1}.".format((m, n), J0.shape)) if not isinstance(J0, np.ndarray): if method == 'lm': raise ValueError("method='lm' works only with dense " "Jacobian matrices.") if tr_solver == 'exact': raise ValueError( "tr_solver='exact' works only with dense " "Jacobian matrices.") jac_scale = isinstance(x_scale, str) and x_scale == 'jac' if isinstance(J0, LinearOperator) and jac_scale: raise ValueError("x_scale='jac' can't be used when `jac` " "returns LinearOperator.") if tr_solver is None: if isinstance(J0, np.ndarray): tr_solver = 'exact' else: tr_solver = 'lsmr' if method == 'lm': result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol, max_nfev, x_scale, diff_step) elif method == 'trf': result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options.copy(), verbose) elif method == 'dogbox': if tr_solver == 'lsmr' and 'regularize' in tr_options: warn("The keyword 'regularize' in `tr_options` is not relevant " "for 'dogbox' method.") tr_options = tr_options.copy() del tr_options['regularize'] result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose) result.message = TERMINATION_MESSAGES[result.status] result.success = result.status > 0 if verbose >= 1: print(result.message) print("Function evaluations {0}, initial cost {1:.4e}, final cost " "{2:.4e}, first-order optimality {3:.2e}." .format(result.nfev, initial_cost, result.cost, result.optimality)) return result
bsd-3-clause
martinwoodward/coreclr
src/pal/automation/util.py
154
2814
import sys import getopt import os import subprocess import shutil import logging as log def Initialize(platform): print "Initializing Workspace" global workspace workspace = os.environ['WORKSPACE'] if platform == "windows": # Jenkins puts quotes in the path, which is wrong. Remove quotes. os.environ['PATH'] = os.environ['PATH'].replace('"','') return workspace def ParseArgs(argv): print "Parsing arguments for compile" try: opts, args = getopt.getopt(argv, "t:p:a:v", ["target=", "platform=", "arch=", "verbose","noclean"]) except getopt.GetoptError: print "ERROR: \n\t usage: python compile.py --target <target> --platform <windows|linux> --arch <arch> [--verbose] [--noclean]" return 2,"","","",True verbose = False cleanUp = True acceptedPlatforms = ['windows','linux'] for opt, arg in opts: if opt in ("-t", "--target"): target = arg elif opt in ("-p", "--platform"): if arg.lower() not in acceptedPlatforms: print "ERROR: " + arg + "not an accepted platform. Use windows or linux." sys.exit(2) platform = arg.lower() elif opt in ("-a", "--arch"): arch = arg elif opt in ("-v", "--verbose"): verbose = True elif opt in ("-c", "--noclean"): cleanUp = False if verbose: log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG) log.info("In verbose mode.") else: log.basicConfig(format="%(levelname)s: %(message)s") if target == "" or platform == "" or arch == "": # must specify target, project and arch log.error("Must specify target, project and arch") return 2,"","","",True return 0,target,platform,arch,cleanUp def SetupDirectories(target, arch, platform): log.info("Setting up directories") global rootdir global builddir global fullBuildDirPath rootdir = "build" if not os.path.isdir(rootdir): os.mkdir(rootdir) os.chdir(rootdir) builddir = "build-" + platform if platform == "windows": builddir = builddir + "-" + arch + "-" + target if os.path.isdir(builddir): shutil.rmtree(builddir) os.mkdir(builddir) os.chdir(builddir) fullbuilddirpath = workspace + "/" + rootdir + "/" + builddir return fullbuilddirpath def Cleanup(cleanUp,workspace): print "\n==================================================\n" print "Cleaning Up." print "\n==================================================\n" if cleanUp: os.chdir(workspace + "/" + rootdir) shutil.rmtree(builddir) os.chdir("..") shutil.rmtree(rootdir) log.shutdown() return 0
mit
seanwestfall/django
django/http/request.py
50
19501
from __future__ import unicode_literals import copy import re import sys from io import BytesIO from itertools import chain from django.conf import settings from django.core import signing from django.core.exceptions import DisallowedHost, ImproperlyConfigured from django.core.files import uploadhandler from django.http.multipartparser import MultiPartParser, MultiPartParserError from django.utils import six from django.utils.datastructures import ImmutableList, MultiValueDict from django.utils.encoding import ( escape_uri_path, force_bytes, force_str, force_text, iri_to_uri, ) from django.utils.six.moves.urllib.parse import ( parse_qsl, quote, urlencode, urljoin, urlsplit, ) RAISE_ERROR = object() host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$") class UnreadablePostError(IOError): pass class RawPostDataException(Exception): """ You cannot access raw_post_data from a request that has multipart/* POST data if it has been accessed via POST, FILES, etc.. """ pass class HttpRequest(object): """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): # WARNING: The `WSGIRequest` subclass doesn't call `super`. # Any variable assignment made here should also happen in # `WSGIRequest.__init__()`. self.GET = QueryDict(mutable=True) self.POST = QueryDict(mutable=True) self.COOKIES = {} self.META = {} self.FILES = MultiValueDict() self.path = '' self.path_info = '' self.method = None self.resolver_match = None self._post_parse_error = False def __repr__(self): if self.method is None or not self.get_full_path(): return force_str('<%s>' % self.__class__.__name__) return force_str( '<%s: %s %r>' % (self.__class__.__name__, self.method, force_str(self.get_full_path())) ) def get_host(self): """Returns the HTTP host using the environment or request headers.""" # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = str(self.META['SERVER_PORT']) if server_port != ('443' if self.is_secure() else '80'): host = '%s:%s' % (host, server_port) # There is no hostname validation when DEBUG=True if settings.DEBUG: return host domain, port = split_domain_port(host) if domain and validate_host(domain, settings.ALLOWED_HOSTS): return host else: msg = "Invalid HTTP_HOST header: %r." % host if domain: msg += " You may need to add %r to ALLOWED_HOSTS." % domain else: msg += " The domain name provided is not valid according to RFC 1034/1035." raise DisallowedHost(msg) def get_full_path(self, force_append_slash=False): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s%s' % ( escape_uri_path(self.path), '/' if force_append_slash and not self.path.endswith('/') else '', ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '' ) def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None): """ Attempts to return a signed cookie. If the signature fails or the cookie has expired, raises an exception... unless you provide the default argument in which case that value will be returned instead. """ try: cookie_value = self.COOKIES[key] except KeyError: if default is not RAISE_ERROR: return default else: raise try: value = signing.get_cookie_signer(salt=key + salt).unsign( cookie_value, max_age=max_age) except signing.BadSignature: if default is not RAISE_ERROR: return default else: raise return value def build_absolute_uri(self, location=None): """ Builds an absolute URI from the location and the variables available in this request. If no ``location`` is specified, the absolute URI is built on ``request.get_full_path()``. Anyway, if the location is absolute, it is simply converted to an RFC 3987 compliant URI and returned and if location is relative or is scheme-relative (i.e., ``//example.com/``), it is urljoined to a base URL constructed from the request variables. """ if location is None: # Make it an absolute url (but schemeless and domainless) for the # edge case that the path starts with '//'. location = '//%s' % self.get_full_path() bits = urlsplit(location) if not (bits.scheme and bits.netloc): current_uri = '{scheme}://{host}{path}'.format(scheme=self.scheme, host=self.get_host(), path=self.path) # Join the constructed URL with the provided location, which will # allow the provided ``location`` to apply query strings to the # base path as well as override the host, if it begins with // location = urljoin(current_uri, location) return iri_to_uri(location) def _get_scheme(self): """ Hook for subclasses like WSGIRequest to implement. Returns 'http' by default. """ return 'http' @property def scheme(self): if settings.SECURE_PROXY_SSL_HEADER: try: header, value = settings.SECURE_PROXY_SSL_HEADER except ValueError: raise ImproperlyConfigured( 'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.' ) if self.META.get(header) == value: return 'https' return self._get_scheme() def is_secure(self): return self.scheme == 'https' def is_ajax(self): return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' @property def encoding(self): return self._encoding @encoding.setter def encoding(self, val): """ Sets the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, it is removed and recreated on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, '_get'): del self._get if hasattr(self, '_post'): del self._post def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] @property def upload_handlers(self): if not self._upload_handlers: # If there are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers @upload_handlers.setter def upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def parse_file_upload(self, META, post_data): """Returns a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning="You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() @property def body(self): if not hasattr(self, '_body'): if self._read_started: raise RawPostDataException("You cannot access body after reading from request's data stream") try: self._body = self.read() except IOError as e: six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2]) self._stream = BytesIO(self._body) return self._body def _mark_post_parse_error(self): self._post = QueryDict('') self._files = MultiValueDict() self._post_parse_error = True def _load_post_and_files(self): """Populate self._post and self._files if the content-type is a form type""" if self.method != 'POST': self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_body'): self._mark_post_parse_error() return if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'): if hasattr(self, '_body'): # Use already read data data = BytesIO(self._body) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except MultiPartParserError: # An error occurred while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. # Mark that an error occurred. This allows self.__repr__ to # be explicit about it instead of simply representing an # empty POST self._mark_post_parse_error() raise elif self.META.get('CONTENT_TYPE', '').startswith('application/x-www-form-urlencoded'): self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict() else: self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict() def close(self): if hasattr(self, '_files'): for f in chain.from_iterable(l[1] for l in self._files.lists()): f.close() # File-like and iterator interface. # # Expects self._stream to be set to an appropriate source of bytes by # a corresponding request subclass (e.g. WSGIRequest). # Also when request data has already been read by request.POST or # request.body, self._stream points to a BytesIO instance # containing that data. def read(self, *args, **kwargs): self._read_started = True try: return self._stream.read(*args, **kwargs) except IOError as e: six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2]) def readline(self, *args, **kwargs): self._read_started = True try: return self._stream.readline(*args, **kwargs) except IOError as e: six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2]) def xreadlines(self): while True: buf = self.readline() if not buf: break yield buf __iter__ = xreadlines def readlines(self): return list(iter(self)) class QueryDict(MultiValueDict): """ A specialized MultiValueDict which represents a query string. A QueryDict can be used to represent GET or POST data. It subclasses MultiValueDict since keys in such data can be repeated, for instance in the data from a form with a <select multiple> field. By default QueryDicts are immutable, though the copy() method will always return a mutable copy. Both keys and values set on this class are converted from the given encoding (DEFAULT_CHARSET by default) to unicode. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string=None, mutable=False, encoding=None): super(QueryDict, self).__init__() if not encoding: encoding = settings.DEFAULT_CHARSET self.encoding = encoding if six.PY3: if isinstance(query_string, bytes): # query_string normally contains URL-encoded data, a subset of ASCII. try: query_string = query_string.decode(encoding) except UnicodeDecodeError: # ... but some user agents are misbehaving :-( query_string = query_string.decode('iso-8859-1') for key, value in parse_qsl(query_string or '', keep_blank_values=True, encoding=encoding): self.appendlist(key, value) else: for key, value in parse_qsl(query_string or '', keep_blank_values=True): try: value = value.decode(encoding) except UnicodeDecodeError: value = value.decode('iso-8859-1') self.appendlist(force_text(key, encoding, errors='replace'), value) self._mutable = mutable @property def encoding(self): if self._encoding is None: self._encoding = settings.DEFAULT_CHARSET return self._encoding @encoding.setter def encoding(self, value): self._encoding = value def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super(QueryDict, self).__setitem__(key, value) def __delitem__(self, key): self._assert_mutable() super(QueryDict, self).__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in six.iterlists(self): result.setlist(key, value) return result def __deepcopy__(self, memo): result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in six.iterlists(self): result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = bytes_to_text(key, self.encoding) list_ = [bytes_to_text(elt, self.encoding) for elt in list_] super(QueryDict, self).setlist(key, list_) def setlistdefault(self, key, default_list=None): self._assert_mutable() return super(QueryDict, self).setlistdefault(key, default_list) def appendlist(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super(QueryDict, self).appendlist(key, value) def pop(self, key, *args): self._assert_mutable() return super(QueryDict, self).pop(key, *args) def popitem(self): self._assert_mutable() return super(QueryDict, self).popitem() def clear(self): self._assert_mutable() super(QueryDict, self).clear() def setdefault(self, key, default=None): self._assert_mutable() key = bytes_to_text(key, self.encoding) default = bytes_to_text(default, self.encoding) return super(QueryDict, self).setdefault(key, default) def copy(self): """Returns a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Returns an encoded string of all query string arguments. :arg safe: Used to specify characters which do not require quoting, for example:: >>> q = QueryDict('', mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: safe = force_bytes(safe, self.encoding) encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe))) else: encode = lambda k, v: urlencode({k: v}) for k, list_ in self.lists(): k = force_bytes(k, self.encoding) output.extend(encode(k, force_bytes(v, self.encoding)) for v in list_) return '&'.join(output) # It's neither necessary nor appropriate to use # django.utils.encoding.smart_text for parsing URLs and form inputs. Thus, # this slightly more restricted function, used by QueryDict. def bytes_to_text(s, encoding): """ Converts basestring objects to unicode, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Returns any non-basestring objects without change. """ if isinstance(s, bytes): return six.text_type(s, encoding, 'replace') else: return s def split_domain_port(host): """ Return a (domain, port) tuple from a given host. Returned domain is lower-cased. If the host is invalid, the domain will be empty. """ host = host.lower() if not host_validation_re.match(host): return '', '' if host[-1] == ']': # It's an IPv6 address without a port. return host, '' bits = host.rsplit(':', 1) if len(bits) == 2: return tuple(bits) return bits[0], '' def validate_host(host, allowed_hosts): """ Validate the given host for this site. Check that the host looks valid and matches a host or host pattern in the given list of ``allowed_hosts``. Any pattern beginning with a period matches a domain and all its subdomains (e.g. ``.example.com`` matches ``example.com`` and any subdomain), ``*`` matches anything, and anything else must match exactly. Note: This function assumes that the given host is lower-cased and has already had the port, if any, stripped off. Return ``True`` for a valid host, ``False`` otherwise. """ host = host[:-1] if host.endswith('.') else host for pattern in allowed_hosts: pattern = pattern.lower() match = ( pattern == '*' or pattern.startswith('.') and ( host.endswith(pattern) or host == pattern[1:] ) or pattern == host ) if match: return True return False
bsd-3-clause
shubhdev/openedx
common/test/acceptance/pages/studio/html_component_editor.py
115
1139
from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains from component_editor import ComponentEditorView class HtmlComponentEditorView(ComponentEditorView): """ Represents the rendered view of an HTML component editor. """ def set_content_and_save(self, content): """ Types content into the html component and presses Save. """ self.set_content(content) self.save() def set_content_and_cancel(self, content): """ Types content into the html component and presses Cancel to abort the change. """ self.set_content(content) self.cancel() def set_content(self, content): """ Types content into the html component, leaving the component open. """ self.q(css='.edit-xblock-modal .editor-modes .editor-button').click() editor = self.q(css=self._bounded_selector('.html-editor .mce-edit-area'))[0] ActionChains(self.browser).click(editor).\ send_keys([Keys.CONTROL, 'a']).key_up(Keys.CONTROL).send_keys(content).perform()
agpl-3.0
MERegistro/meregistro
django/views/generic/simple.py
76
1634
from django.template import loader, RequestContext from django.http import HttpResponse, HttpResponseRedirect, HttpResponsePermanentRedirect, HttpResponseGone def direct_to_template(request, template, extra_context=None, mimetype=None, **kwargs): """ Render a given template with any extra URL parameters in the context as ``{{ params }}``. """ if extra_context is None: extra_context = {} dictionary = {'params': kwargs} for key, value in extra_context.items(): if callable(value): dictionary[key] = value() else: dictionary[key] = value c = RequestContext(request, dictionary) t = loader.get_template(template) return HttpResponse(t.render(c), mimetype=mimetype) def redirect_to(request, url, permanent=True, **kwargs): """ Redirect to a given URL. The given url may contain dict-style string formatting, which will be interpolated against the params in the URL. For example, to redirect from ``/foo/<id>/`` to ``/bar/<id>/``, you could use the following URLconf:: urlpatterns = patterns('', ('^foo/(?P<id>\d+)/$', 'django.views.generic.simple.redirect_to', {'url' : '/bar/%(id)s/'}), ) If the given url is ``None``, a HttpResponseGone (410) will be issued. If the ``permanent`` argument is False, then the response will have a 302 HTTP status code. Otherwise, the status code will be 301. """ if url is not None: klass = permanent and HttpResponsePermanentRedirect or HttpResponseRedirect return klass(url % kwargs) else: return HttpResponseGone()
bsd-3-clause
x111ong/django
tests/model_regress/test_pickle.py
271
3804
import datetime import os import pickle import subprocess import sys import warnings from django.core.files.temp import NamedTemporaryFile from django.db import DJANGO_VERSION_PICKLE_KEY, models from django.test import TestCase, mock from django.utils._os import npath, upath from django.utils.encoding import force_text from django.utils.version import get_version from .models import Article class ModelPickleTestCase(TestCase): def test_missing_django_version_unpickling(self): """ #21430 -- Verifies a warning is raised for models that are unpickled without a Django version """ class MissingDjangoVersion(models.Model): title = models.CharField(max_length=10) def __reduce__(self): reduce_list = super(MissingDjangoVersion, self).__reduce__() data = reduce_list[-1] del data[DJANGO_VERSION_PICKLE_KEY] return reduce_list p = MissingDjangoVersion(title="FooBar") with warnings.catch_warnings(record=True) as recorded: pickle.loads(pickle.dumps(p)) msg = force_text(recorded.pop().message) self.assertEqual(msg, "Pickled model instance's Django version is not specified.") def test_unsupported_unpickle(self): """ #21430 -- Verifies a warning is raised for models that are unpickled with a different Django version than the current """ class DifferentDjangoVersion(models.Model): title = models.CharField(max_length=10) def __reduce__(self): reduce_list = super(DifferentDjangoVersion, self).__reduce__() data = reduce_list[-1] data[DJANGO_VERSION_PICKLE_KEY] = '1.0' return reduce_list p = DifferentDjangoVersion(title="FooBar") with warnings.catch_warnings(record=True) as recorded: pickle.loads(pickle.dumps(p)) msg = force_text(recorded.pop().message) self.assertEqual( msg, "Pickled model instance's Django version 1.0 does not " "match the current version %s." % get_version() ) def test_unpickling_when_appregistrynotready(self): """ #24007 -- Verifies that a pickled model can be unpickled without having to manually setup the apps registry beforehand. """ script_template = """#!/usr/bin/env python import pickle from django.conf import settings data = %r settings.configure(DEBUG=False, INSTALLED_APPS=['model_regress'], SECRET_KEY = "blah") article = pickle.loads(data) print(article.headline)""" a = Article.objects.create( headline="Some object", pub_date=datetime.datetime.now(), article_text="This is an article", ) with NamedTemporaryFile(mode='w+', suffix=".py") as script: script.write(script_template % pickle.dumps(a)) script.flush() # A path to model_regress must be set in PYTHONPATH model_regress_dir = os.path.dirname(upath(__file__)) model_regress_path = os.path.abspath(model_regress_dir) tests_path = os.path.split(model_regress_path)[0] pythonpath = os.environ.get('PYTHONPATH', '') pythonpath = npath(os.pathsep.join([tests_path, pythonpath])) with mock.patch.dict('os.environ', {'PYTHONPATH': pythonpath}): try: result = subprocess.check_output([sys.executable, script.name]) except subprocess.CalledProcessError: self.fail("Unable to reload model pickled data") self.assertEqual(result.strip().decode(), "Some object")
bsd-3-clause
xundaokeji/three.js
utils/exporters/blender/addons/io_three/exporter/base_classes.py
72
3264
from . import utilities from .. import constants, exceptions class BaseClass(constants.BASE_DICT): """Base class which inherits from a base dictionary object.""" _defaults = {} def __init__(self, parent=None, type=None): constants.BASE_DICT.__init__(self) self._type = type self._parent = parent constants.BASE_DICT.update(self, self._defaults.copy()) BaseClass._defaults = {} def __setitem__(self, key, value): if not isinstance(value, constants.VALID_DATA_TYPES): msg = "Value is an invalid data type: %s" % type(value) raise exceptions.ThreeValueError(msg) constants.BASE_DICT.__setitem__(self, key, value) @property def count(self): """ :return: number of keys :rtype: int """ return len(self.keys()) @property def parent(self): """ :return: parent object """ return self._parent @property def type(self): """ :return: the type (if applicable) """ return self._type def copy(self): """Copies the items to a standard dictionary object. :rtype: dict """ data = {} def _dict_copy(old, new): """Recursive function for processing all values :param old: :param new: """ for key, value in old.items(): if isinstance(value, (str, list)): new[key] = value[:] elif isinstance(value, tuple): new[key] = value+tuple() elif isinstance(value, dict): new[key] = {} _dict_copy(value, new[key]) else: new[key] = value _dict_copy(self, data) return data class BaseNode(BaseClass): """Base class for all nodes for the current platform.""" def __init__(self, node, parent, type): BaseClass.__init__(self, parent=parent, type=type) self._node = node if node is None: self[constants.UUID] = utilities.id() else: self[constants.NAME] = node self[constants.UUID] = utilities.id() if isinstance(parent, BaseScene): scene = parent elif parent is not None: scene = parent.scene else: scene = None self._scene = scene @property def node(self): """ :return: name of the node """ return self._node @property def scene(self): """ :return: returns the scene point """ return self._scene @property def options(self): """ :return: export options :retype: dict """ return self.scene.options class BaseScene(BaseClass): """Base class that scenes inherit from.""" def __init__(self, filepath, options): BaseClass.__init__(self, type=constants.SCENE) self._filepath = filepath self._options = options.copy() @property def filepath(self): return self._filepath @property def options(self): return self._options
mit
otherness-space/myProject
my_project_001/lib/python2.7/site-packages/django/contrib/flatpages/tests/middleware.py
207
6861
import os from django.conf import settings from django.contrib.auth.models import User from django.contrib.auth.tests.utils import skipIfCustomUser from django.contrib.flatpages.models import FlatPage from django.test import TestCase from django.test.utils import override_settings @override_settings( LOGIN_URL='/accounts/login/', MIDDLEWARE_CLASSES=( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware', ), TEMPLATE_DIRS=( os.path.join(os.path.dirname(__file__), 'templates'), ), SITE_ID=1, ) class FlatpageMiddlewareTests(TestCase): fixtures = ['sample_flatpages', 'example_site'] urls = 'django.contrib.flatpages.tests.urls' def test_view_flatpage(self): "A flatpage can be served through a view, even when the middleware is in use" response = self.client.get('/flatpage_root/flatpage/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it flat!</p>") def test_view_non_existent_flatpage(self): "A non-existent flatpage raises 404 when served through a view, even when the middleware is in use" response = self.client.get('/flatpage_root/no_such_flatpage/') self.assertEqual(response.status_code, 404) @skipIfCustomUser def test_view_authenticated_flatpage(self): "A flatpage served through a view can require authentication" response = self.client.get('/flatpage_root/sekrit/') self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/') User.objects.create_user('testuser', 'test@example.com', 's3krit') self.client.login(username='testuser',password='s3krit') response = self.client.get('/flatpage_root/sekrit/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it sekrit!</p>") def test_fallback_flatpage(self): "A flatpage can be served by the fallback middlware" response = self.client.get('/flatpage/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it flat!</p>") def test_fallback_non_existent_flatpage(self): "A non-existent flatpage raises a 404 when served by the fallback middlware" response = self.client.get('/no_such_flatpage/') self.assertEqual(response.status_code, 404) @skipIfCustomUser def test_fallback_authenticated_flatpage(self): "A flatpage served by the middleware can require authentication" response = self.client.get('/sekrit/') self.assertRedirects(response, '/accounts/login/?next=/sekrit/') User.objects.create_user('testuser', 'test@example.com', 's3krit') self.client.login(username='testuser',password='s3krit') response = self.client.get('/sekrit/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it sekrit!</p>") def test_fallback_flatpage_special_chars(self): "A flatpage with special chars in the URL can be served by the fallback middleware" fp = FlatPage.objects.create( url="/some.very_special~chars-here/", title="A very special page", content="Isn't it special!", enable_comments=False, registration_required=False, ) fp.sites.add(settings.SITE_ID) response = self.client.get('/some.very_special~chars-here/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it special!</p>") @override_settings( APPEND_SLASH = True, LOGIN_URL='/accounts/login/', MIDDLEWARE_CLASSES=( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware', ), TEMPLATE_DIRS=( os.path.join(os.path.dirname(__file__), 'templates'), ), SITE_ID=1, ) class FlatpageMiddlewareAppendSlashTests(TestCase): fixtures = ['sample_flatpages', 'example_site'] urls = 'django.contrib.flatpages.tests.urls' def test_redirect_view_flatpage(self): "A flatpage can be served through a view and should add a slash" response = self.client.get('/flatpage_root/flatpage') self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301) def test_redirect_view_non_existent_flatpage(self): "A non-existent flatpage raises 404 when served through a view and should not add a slash" response = self.client.get('/flatpage_root/no_such_flatpage') self.assertEqual(response.status_code, 404) def test_redirect_fallback_flatpage(self): "A flatpage can be served by the fallback middlware and should add a slash" response = self.client.get('/flatpage') self.assertRedirects(response, '/flatpage/', status_code=301) def test_redirect_fallback_non_existent_flatpage(self): "A non-existent flatpage raises a 404 when served by the fallback middlware and should not add a slash" response = self.client.get('/no_such_flatpage') self.assertEqual(response.status_code, 404) def test_redirect_fallback_flatpage_special_chars(self): "A flatpage with special chars in the URL can be served by the fallback middleware and should add a slash" fp = FlatPage.objects.create( url="/some.very_special~chars-here/", title="A very special page", content="Isn't it special!", enable_comments=False, registration_required=False, ) fp.sites.add(settings.SITE_ID) response = self.client.get('/some.very_special~chars-here') self.assertRedirects(response, '/some.very_special~chars-here/', status_code=301) def test_redirect_fallback_flatpage_root(self): "A flatpage at / should not cause a redirect loop when APPEND_SLASH is set" fp = FlatPage.objects.create( url="/", title="Root", content="Root", enable_comments=False, registration_required=False, ) fp.sites.add(settings.SITE_ID) response = self.client.get('/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Root</p>")
mit
alrifqi/django
tests/csrf_tests/tests.py
152
19350
# -*- coding: utf-8 -*- from __future__ import unicode_literals import logging from django.conf import settings from django.http import HttpRequest, HttpResponse from django.middleware.csrf import ( CSRF_KEY_LENGTH, CsrfViewMiddleware, get_token, ) from django.template import RequestContext, Template from django.template.context_processors import csrf from django.test import SimpleTestCase, override_settings from django.views.decorators.csrf import ( csrf_exempt, ensure_csrf_cookie, requires_csrf_token, ) # Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests def post_form_response(): resp = HttpResponse(content=""" <html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html> """, mimetype="text/html") return resp def post_form_view(request): """A view that returns a POST form (without a token)""" return post_form_response() # Response/views used for template tag tests def token_view(request): """A view that uses {% csrf_token %}""" context = RequestContext(request, processors=[csrf]) template = Template("{% csrf_token %}") return HttpResponse(template.render(context)) def non_token_view_using_request_processor(request): """ A view that doesn't use the token, but does use the csrf view processor. """ context = RequestContext(request, processors=[csrf]) template = Template("") return HttpResponse(template.render(context)) class TestingHttpRequest(HttpRequest): """ A version of HttpRequest that allows us to change some things more easily """ def is_secure(self): return getattr(self, '_is_secure_override', False) class CsrfViewMiddlewareTest(SimpleTestCase): # The csrf token is potentially from an untrusted source, so could have # characters that need dealing with. _csrf_id_cookie = b"<1>\xc2\xa1" _csrf_id = "1" def _get_GET_no_csrf_cookie_request(self): return TestingHttpRequest() def _get_GET_csrf_cookie_request(self): req = TestingHttpRequest() req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie return req def _get_POST_csrf_cookie_request(self): req = self._get_GET_csrf_cookie_request() req.method = "POST" return req def _get_POST_no_csrf_cookie_request(self): req = self._get_GET_no_csrf_cookie_request() req.method = "POST" return req def _get_POST_request_with_token(self): req = self._get_POST_csrf_cookie_request() req.POST['csrfmiddlewaretoken'] = self._csrf_id return req def _check_token_present(self, response, csrf_id=None): self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id)) def test_process_view_token_too_long(self): """ Check that if the token is longer than expected, it is ignored and a new token is created. """ req = self._get_GET_no_csrf_cookie_request() req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 10000000 CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False) self.assertEqual(len(csrf_cookie.value), CSRF_KEY_LENGTH) def test_process_response_get_token_used(self): """ When get_token is used, check that the cookie is created and headers patched. """ req = self._get_GET_no_csrf_cookie_request() # Put tests for CSRF_COOKIE_* settings here with self.settings(CSRF_COOKIE_NAME='myname', CSRF_COOKIE_DOMAIN='.example.com', CSRF_COOKIE_PATH='/test/', CSRF_COOKIE_SECURE=True, CSRF_COOKIE_HTTPONLY=True): # token_view calls get_token() indirectly CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get('myname', False) self.assertNotEqual(csrf_cookie, False) self.assertEqual(csrf_cookie['domain'], '.example.com') self.assertEqual(csrf_cookie['secure'], True) self.assertEqual(csrf_cookie['httponly'], True) self.assertEqual(csrf_cookie['path'], '/test/') self.assertIn('Cookie', resp2.get('Vary', '')) def test_process_response_get_token_not_used(self): """ Check that if get_token() is not called, the view middleware does not add a cookie. """ # This is important to make pages cacheable. Pages which do call # get_token(), assuming they use the token, are not cacheable because # the token is specific to the user req = self._get_GET_no_csrf_cookie_request() # non_token_view_using_request_processor does not call get_token(), but # does use the csrf request processor. By using this, we are testing # that the view processor is properly lazy and doesn't call get_token() # until needed. CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {}) resp = non_token_view_using_request_processor(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False) self.assertEqual(csrf_cookie, False) # Check the request processing def test_process_request_no_csrf_cookie(self): """ Check that if no CSRF cookies is present, the middleware rejects the incoming request. This will stop login CSRF. """ req = self._get_POST_no_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) def test_process_request_csrf_cookie_no_token(self): """ Check that if a CSRF cookie is present but no token, the middleware rejects the incoming request. """ req = self._get_POST_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) def test_process_request_csrf_cookie_and_token(self): """ Check that if both a cookie and a token is present, the middleware lets it through. """ req = self._get_POST_request_with_token() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) def test_process_request_csrf_cookie_no_token_exempt_view(self): """ Check that if a CSRF cookie is present and no token, but the csrf_exempt decorator has been applied to the view, the middleware lets it through """ req = self._get_POST_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {}) self.assertIsNone(req2) def test_csrf_token_in_header(self): """ Check that we can pass in the token in a header instead of in the form """ req = self._get_POST_csrf_cookie_request() req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED') def test_csrf_token_in_header_with_customized_name(self): """ settings.CSRF_HEADER_NAME can be used to customize the CSRF header name """ req = self._get_POST_csrf_cookie_request() req.META['HTTP_X_CSRFTOKEN_CUSTOMIZED'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) def test_put_and_delete_rejected(self): """ Tests that HTTP PUT and DELETE methods have protection """ req = TestingHttpRequest() req.method = 'PUT' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) req = TestingHttpRequest() req.method = 'DELETE' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) def test_put_and_delete_allowed(self): """ Tests that HTTP PUT and DELETE methods can get through with X-CSRFToken and a cookie """ req = self._get_GET_csrf_cookie_request() req.method = 'PUT' req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) req = self._get_GET_csrf_cookie_request() req.method = 'DELETE' req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) # Tests for the template tag method def test_token_node_no_csrf_cookie(self): """ Check that CsrfTokenNode works when no CSRF cookie is set """ req = self._get_GET_no_csrf_cookie_request() resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) self._check_token_present(resp, token) def test_token_node_empty_csrf_cookie(self): """ Check that we get a new token if the csrf_cookie is the empty string """ req = self._get_GET_no_csrf_cookie_request() req.COOKIES[settings.CSRF_COOKIE_NAME] = b"" CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) self._check_token_present(resp, token) def test_token_node_with_csrf_cookie(self): """ Check that CsrfTokenNode works when a CSRF cookie is set """ req = self._get_GET_csrf_cookie_request() CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) self._check_token_present(resp) def test_get_token_for_exempt_view(self): """ Check that get_token still works for a view decorated with 'csrf_exempt'. """ req = self._get_GET_csrf_cookie_request() CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {}) resp = token_view(req) self._check_token_present(resp) def test_get_token_for_requires_csrf_token_view(self): """ Check that get_token works for a view decorated solely with requires_csrf_token """ req = self._get_GET_csrf_cookie_request() resp = requires_csrf_token(token_view)(req) self._check_token_present(resp) def test_token_node_with_new_csrf_cookie(self): """ Check that CsrfTokenNode works when a CSRF cookie is created by the middleware (when one was not already present) """ req = self._get_GET_no_csrf_cookie_request() CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME] self._check_token_present(resp, csrf_id=csrf_cookie.value) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_bad_referer(self): """ Test that a POST HTTPS request with a bad referer is rejected """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNotNone(req2) self.assertEqual(403, req2.status_code) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_malformed_referer(self): """ Test that a POST HTTPS request with a bad referer is rejected """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'http://http://www.example.com/' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNotNone(req2) self.assertEqual(403, req2.status_code) # Non-ASCII req.META['HTTP_REFERER'] = b'\xd8B\xf6I\xdf' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNotNone(req2) self.assertEqual(403, req2.status_code) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_good_referer(self): """ Test that a POST HTTPS request with a good referer is accepted """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.example.com/somepage' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_good_referer_2(self): """ Test that a POST HTTPS request with a good referer is accepted where the referer contains no trailing slash """ # See ticket #15617 req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.example.com' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) def test_ensures_csrf_cookie_no_middleware(self): """ Tests that ensures_csrf_cookie decorator fulfils its promise with no middleware """ @ensure_csrf_cookie def view(request): # Doesn't insert a token or anything return HttpResponse(content="") req = self._get_GET_no_csrf_cookie_request() resp = view(req) self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False)) self.assertIn('Cookie', resp.get('Vary', '')) def test_ensures_csrf_cookie_with_middleware(self): """ Tests that ensures_csrf_cookie decorator fulfils its promise with the middleware enabled. """ @ensure_csrf_cookie def view(request): # Doesn't insert a token or anything return HttpResponse(content="") req = self._get_GET_no_csrf_cookie_request() CsrfViewMiddleware().process_view(req, view, (), {}) resp = view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)) self.assertIn('Cookie', resp2.get('Vary', '')) def test_ensures_csrf_cookie_no_logging(self): """ Tests that ensure_csrf_cookie doesn't log warnings. See #19436. """ @ensure_csrf_cookie def view(request): # Doesn't insert a token or anything return HttpResponse(content="") class TestHandler(logging.Handler): def emit(self, record): raise Exception("This shouldn't have happened!") logger = logging.getLogger('django.request') test_handler = TestHandler() old_log_level = logger.level try: logger.addHandler(test_handler) logger.setLevel(logging.WARNING) req = self._get_GET_no_csrf_cookie_request() view(req) finally: logger.removeHandler(test_handler) logger.setLevel(old_log_level) def test_csrf_cookie_age(self): """ Test to verify CSRF cookie age can be set using settings.CSRF_COOKIE_AGE. """ req = self._get_GET_no_csrf_cookie_request() MAX_AGE = 123 with self.settings(CSRF_COOKIE_NAME='csrfcookie', CSRF_COOKIE_DOMAIN='.example.com', CSRF_COOKIE_AGE=MAX_AGE, CSRF_COOKIE_PATH='/test/', CSRF_COOKIE_SECURE=True, CSRF_COOKIE_HTTPONLY=True): # token_view calls get_token() indirectly CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) max_age = resp2.cookies.get('csrfcookie').get('max-age') self.assertEqual(max_age, MAX_AGE) def test_csrf_cookie_age_none(self): """ Test to verify CSRF cookie age does not have max age set and therefore uses session-based cookies. """ req = self._get_GET_no_csrf_cookie_request() MAX_AGE = None with self.settings(CSRF_COOKIE_NAME='csrfcookie', CSRF_COOKIE_DOMAIN='.example.com', CSRF_COOKIE_AGE=MAX_AGE, CSRF_COOKIE_PATH='/test/', CSRF_COOKIE_SECURE=True, CSRF_COOKIE_HTTPONLY=True): # token_view calls get_token() indirectly CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) max_age = resp2.cookies.get('csrfcookie').get('max-age') self.assertEqual(max_age, '') def test_post_data_read_failure(self): """ #20128 -- IOErrors during POST data reading should be caught and treated as if the POST data wasn't there. """ class CsrfPostRequest(HttpRequest): """ HttpRequest that can raise an IOError when accessing POST data """ def __init__(self, token, raise_error): super(CsrfPostRequest, self).__init__() self.method = 'POST' self.raise_error = False self.COOKIES[settings.CSRF_COOKIE_NAME] = token self.POST['csrfmiddlewaretoken'] = token self.raise_error = raise_error def _load_post_and_files(self): raise IOError('error reading input data') def _get_post(self): if self.raise_error: self._load_post_and_files() return self._post def _set_post(self, post): self._post = post POST = property(_get_post, _set_post) token = 'ABC' req = CsrfPostRequest(token, raise_error=False) resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(resp) req = CsrfPostRequest(token, raise_error=True) resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(resp.status_code, 403)
bsd-3-clause
nickmoline/feedsanitizer
djangoappengine/tests/filter.py
5
20149
from ..db.utils import get_cursor, set_cursor from .testmodels import FieldsWithOptionsModel, EmailModel, DateTimeModel, \ OrderedModel, BlobModel from django.db.models import Q from django.db.utils import DatabaseError from django.test import TestCase from django.utils import unittest from google.appengine.api.datastore import Get, Key import datetime import time class FilterTest(TestCase): floats = [5.3, 2.6, 9.1, 1.58] emails = ['app-engine@scholardocs.com', 'sharingan@uchias.com', 'rinnengan@sage.de', 'rasengan@naruto.com'] datetimes = [datetime.datetime(2010, 1, 1, 0, 0, 0, 0), datetime.datetime(2010, 12, 31, 23, 59, 59, 999999), datetime.datetime(2011, 1, 1, 0, 0, 0, 0), datetime.datetime(2013, 7, 28, 22, 30, 20, 50)] def setUp(self): for index, (float, email, datetime_value) in enumerate(zip(FilterTest.floats, FilterTest.emails, FilterTest.datetimes)): # ensure distinct times when saving entities time.sleep(0.01) self.last_save_time = datetime.datetime.now().time() ordered_instance = OrderedModel(priority=index, pk=index + 1) ordered_instance.save() FieldsWithOptionsModel(floating_point=float, integer=int(float), email=email, time=self.last_save_time, foreign_key=ordered_instance).save() EmailModel(email=email).save() DateTimeModel(datetime=datetime_value).save() def test_startswith(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__startswith='r').order_by('email')], ['rasengan@naruto.com', 'rinnengan@sage.de']) self.assertEquals([entity.email for entity in EmailModel.objects.filter( email__startswith='r').order_by('email')], ['rasengan@naruto.com', 'rinnengan@sage.de']) def test_gt(self): # test gt on float self.assertEquals([entity.floating_point for entity in FieldsWithOptionsModel.objects.filter( floating_point__gt=3.1).order_by('floating_point')], [5.3, 9.1]) # test gt on integer self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.filter( integer__gt=3).order_by('integer')], [5, 9]) # test filter on primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter(email__gt='as'). order_by('email')], ['rasengan@naruto.com', 'rinnengan@sage.de', 'sharingan@uchias.com', ]) # test ForeignKeys with id self.assertEquals(sorted([entity.email for entity in FieldsWithOptionsModel.objects.filter( foreign_key__gt=2)]), ['rasengan@naruto.com', 'rinnengan@sage.de', ]) # and with instance ordered_instance = OrderedModel.objects.get(priority=1) self.assertEquals(sorted([entity.email for entity in FieldsWithOptionsModel.objects.filter( foreign_key__gt=ordered_instance)]), ['rasengan@naruto.com', 'rinnengan@sage.de', ]) def test_lt(self): # test lt on float self.assertEquals([entity.floating_point for entity in FieldsWithOptionsModel.objects.filter( floating_point__lt=3.1).order_by('floating_point')], [1.58, 2.6]) # test lt on integer self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.filter( integer__lt=3).order_by('integer')], [1, 2]) # test filter on primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter(email__lt='as'). order_by('email')], ['app-engine@scholardocs.com', ]) # filter on datetime self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( time__lt=self.last_save_time).order_by('time')], ['app-engine@scholardocs.com', 'sharingan@uchias.com', 'rinnengan@sage.de',]) # test ForeignKeys with id self.assertEquals(sorted([entity.email for entity in FieldsWithOptionsModel.objects.filter( foreign_key__lt=3)]), ['app-engine@scholardocs.com', 'sharingan@uchias.com']) # and with instance ordered_instance = OrderedModel.objects.get(priority=2) self.assertEquals(sorted([entity.email for entity in FieldsWithOptionsModel.objects.filter( foreign_key__lt=ordered_instance)]), ['app-engine@scholardocs.com', 'sharingan@uchias.com']) def test_gte(self): # test gte on float self.assertEquals([entity.floating_point for entity in FieldsWithOptionsModel.objects.filter( floating_point__gte=2.6).order_by('floating_point')], [2.6, 5.3, 9.1]) # test gte on integer self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.filter( integer__gte=2).order_by('integer')], [2, 5, 9]) # test filter on primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__gte='rinnengan@sage.de').order_by('email')], ['rinnengan@sage.de', 'sharingan@uchias.com', ]) def test_lte(self): # test lte on float self.assertEquals([entity.floating_point for entity in FieldsWithOptionsModel.objects.filter( floating_point__lte=5.3).order_by('floating_point')], [1.58, 2.6, 5.3]) # test lte on integer self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.filter( integer__lte=5).order_by('integer')], [1, 2, 5]) # test filter on primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__lte='rinnengan@sage.de').order_by('email')], ['app-engine@scholardocs.com', 'rasengan@naruto.com', 'rinnengan@sage.de']) def test_equals(self): # test equality filter on primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email='rinnengan@sage.de').order_by('email')], ['rinnengan@sage.de']) def test_is_null(self): self.assertEquals(FieldsWithOptionsModel.objects.filter( floating_point__isnull=True).count(), 0) FieldsWithOptionsModel(integer=5.4, email='shinra.tensai@sixpaths.com', time=datetime.datetime.now().time()).save() self.assertEquals(FieldsWithOptionsModel.objects.filter( floating_point__isnull=True).count(), 1) # XXX: These filters will not work because of a Django bug # self.assertEquals(FieldsWithOptionsModel.objects.filter( # foreign_key=None).count(), 1) # (it uses left outer joins if checked against isnull # self.assertEquals(FieldsWithOptionsModel.objects.filter( # foreign_key__isnull=True).count(), 1) def test_exclude(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.all().exclude( floating_point__lt=9.1).order_by('floating_point')], ['rinnengan@sage.de', ]) # test exclude with foreignKey ordered_instance = OrderedModel.objects.get(priority=1) self.assertEquals(sorted([entity.email for entity in FieldsWithOptionsModel.objects.all().exclude( foreign_key__gt=ordered_instance)]), ['app-engine@scholardocs.com', 'sharingan@uchias.com',]) def test_exclude_pk(self): self.assertEquals([entity.pk for entity in OrderedModel.objects.exclude(pk__in=[2, 3]) .order_by('pk')], [1, 4]) def test_chained_filter(self): # additionally tests count :) self.assertEquals(FieldsWithOptionsModel.objects.filter( floating_point__lt=5.3, floating_point__gt=2.6). count(), 0) # test across multiple columns. On app engine only one filter is allowed # to be an inequality filter self.assertEquals([(entity.floating_point, entity.integer) for entity in FieldsWithOptionsModel.objects.filter( floating_point__lte=5.3, integer=2).order_by( 'floating_point')], [(2.6, 2), ]) # test multiple filters including the primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__gte='rinnengan@sage.de', integer=2).order_by( 'email')], ['sharingan@uchias.com', ]) # test in filter on primary key with another arbitrary filter self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__in=['rinnengan@sage.de', 'sharingan@uchias.com'], integer__gt=2).order_by( 'integer')], ['rinnengan@sage.de', ]) # Test exceptions # test multiple filters exception when filtered and not ordered against # the first filter self.assertRaises(DatabaseError, lambda: FieldsWithOptionsModel.objects.filter( email__gte='rinnengan@sage.de', floating_point=5.3).order_by( 'floating_point')[0]) # test exception if filtered across multiple columns with inequality filter self.assertRaises(DatabaseError, FieldsWithOptionsModel.objects.filter( floating_point__lte=5.3, integer__gte=2).order_by( 'floating_point').get) # test exception if filtered across multiple columns with inequality filter # with exclude self.assertRaises(DatabaseError, FieldsWithOptionsModel.objects.filter( email__lte='rinnengan@sage.de').exclude( floating_point__lt=9.1).order_by('email').get) self.assertRaises(DatabaseError, lambda: FieldsWithOptionsModel.objects.all().exclude( floating_point__lt=9.1).order_by('email')[0]) # TODO: Maybe check all possible exceptions def test_slicing(self): # test slicing on filter with primary_key self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__lte='rinnengan@sage.de').order_by('email')[:2]], ['app-engine@scholardocs.com', 'rasengan@naruto.com', ]) self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__lte='rinnengan@sage.de').order_by('email')[1:2]], ['rasengan@naruto.com', ]) # test on non pk field self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.all().order_by( 'integer')[:2]], [1, 2, ]) self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.all().order_by( 'email')[::2]], ['app-engine@scholardocs.com', 'rinnengan@sage.de',]) def test_cursor(self): results = list(FieldsWithOptionsModel.objects.all()) cursor = None for item in results: query = FieldsWithOptionsModel.objects.all()[:1] if cursor is not None: query = set_cursor(query, cursor) next = query[0] self.assertEqual(next.pk, item.pk) cursor = get_cursor(query) query = set_cursor(FieldsWithOptionsModel.objects.all(), cursor) self.assertEqual(list(query[:1]), []) def test_Q_objects(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( Q(email__lte='rinnengan@sage.de')).order_by('email')][:2], ['app-engine@scholardocs.com', 'rasengan@naruto.com', ]) self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.exclude(Q(integer__lt=5) | Q(integer__gte=9)).order_by('integer')], [5, ]) self.assertRaises(TypeError, FieldsWithOptionsModel.objects.filter( Q(floating_point=9.1), Q(integer=9) | Q(integer=2))) def test_pk_in(self): # test pk__in with field name email self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__in=['app-engine@scholardocs.com', 'rasengan@naruto.com'])], ['app-engine@scholardocs.com', 'rasengan@naruto.com']) def test_in(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( floating_point__in=[5.3, 2.6, 1.58]).filter( integer__in=[1, 5, 9])], ['app-engine@scholardocs.com', 'rasengan@naruto.com']) def test_in_with_pk_in(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( floating_point__in=[5.3, 2.6, 1.58]).filter( email__in=['app-engine@scholardocs.com', 'rasengan@naruto.com'])], ['app-engine@scholardocs.com', 'rasengan@naruto.com']) def test_inequality(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.exclude( floating_point=5.3).filter( integer__in=[1, 5, 9])], ['rasengan@naruto.com', 'rinnengan@sage.de']) def test_values(self): # test values() self.assertEquals([entity['pk'] for entity in FieldsWithOptionsModel.objects.filter(integer__gt=3). order_by('integer').values('pk')], ['app-engine@scholardocs.com', 'rinnengan@sage.de']) self.assertEquals(FieldsWithOptionsModel.objects.filter(integer__gt=3). order_by('integer').values('pk').count(), 2) # these queries first fetch the whole entity and then only return the # desired fields selected in .values self.assertEquals([entity['integer'] for entity in FieldsWithOptionsModel.objects.filter( email__startswith='r').order_by('email').values( 'integer')], [1, 9]) self.assertEquals([entity['floating_point'] for entity in FieldsWithOptionsModel.objects.filter(integer__gt=3). order_by('integer').values('floating_point')], [5.3, 9.1]) # test values_list self.assertEquals([entity[0] for entity in FieldsWithOptionsModel.objects.filter(integer__gt=3). order_by('integer').values_list('pk')], ['app-engine@scholardocs.com', 'rinnengan@sage.de']) def test_range(self): # test range on float self.assertEquals([entity.floating_point for entity in FieldsWithOptionsModel.objects.filter( floating_point__range=(2.6, 9.1)). order_by('floating_point')], [2.6, 5.3, 9.1,]) # test range on pk self.assertEquals([entity.pk for entity in FieldsWithOptionsModel.objects.filter( pk__range=('app-engine@scholardocs.com', 'rinnengan@sage.de')). order_by('pk')], ['app-engine@scholardocs.com', 'rasengan@naruto.com', 'rinnengan@sage.de',]) # test range on date/datetime objects start_time = datetime.time(self.last_save_time.hour, self.last_save_time.minute - 1, self.last_save_time.second, self.last_save_time.microsecond) self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( time__range=(start_time, self.last_save_time)).order_by('time')], ['app-engine@scholardocs.com', 'sharingan@uchias.com', 'rinnengan@sage.de', 'rasengan@naruto.com',]) def test_date(self): # test year on date range boundaries self.assertEquals([entity.datetime for entity in DateTimeModel.objects.filter( datetime__year=2010).order_by('datetime')], [datetime.datetime(2010, 1, 1, 0, 0, 0, 0), datetime.datetime(2010, 12, 31, 23, 59, 59, 999999),]) # test year on non boundary date self.assertEquals([entity.datetime for entity in DateTimeModel.objects.filter( datetime__year=2013).order_by('datetime')], [datetime.datetime(2013, 7, 28, 22, 30, 20, 50),]) def test_auto_now(self): time.sleep(0.1) entity = DateTimeModel.objects.all()[0] auto_now = entity.datetime_auto_now entity.save() entity = DateTimeModel.objects.get(pk=entity.pk) self.assertNotEqual(auto_now, entity.datetime_auto_now) def test_auto_now_add(self): time.sleep(0.1) entity = DateTimeModel.objects.all()[0] auto_now_add = entity.datetime_auto_now_add entity.save() entity = DateTimeModel.objects.get(pk=entity.pk) self.assertEqual(auto_now_add, entity.datetime_auto_now_add) def test_latest(self): self.assertEquals(FieldsWithOptionsModel.objects.latest('time').floating_point, 1.58) def test_blob(self): x = BlobModel(data='lalala') x.full_clean() x.save() e = Get(Key.from_path(BlobModel._meta.db_table, x.pk)) self.assertEqual(e['data'], x.data) x = BlobModel.objects.all()[0] self.assertEqual(e['data'], x.data)
mit
Hybrid-Cloud/badam
patches_tool/aws_patch/aws_deps/libcloud/compute/drivers/vultr.py
1
6023
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Vultr Driver """ import time from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlencode from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.compute.types import Provider, NodeState from libcloud.common.types import LibcloudError, InvalidCredsError from libcloud.compute.base import NodeDriver from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation class VultrResponse(JsonResponse): def parse_error(self): if self.status == httplib.OK: body = self.parse_body() return body elif self.status == httplib.FORBIDDEN: raise InvalidCredsError(self.body) else: raise LibcloudError(self.body) class VultrConnection(ConnectionKey): """ Connection class for the Vultr driver. """ host = 'api.vultr.com' responseCls = VultrResponse def add_default_params(self, params): """ Add parameters that are necessary for every request This method add ``api_key`` to the request. """ params['api_key'] = self.key return params def encode_data(self, data): return urlencode(data) def get(self, url): return self.request(url) def post(self, url, data): headers = {'Content-Type': 'application/x-www-form-urlencoded'} return self.request(url, data=data, headers=headers, method='POST') class VultrNodeDriver(NodeDriver): """ VultrNode node driver. """ connectionCls = VultrConnection type = Provider.VULTR name = 'Vultr' website = 'https://www.vultr.com' NODE_STATE_MAP = {'pending': NodeState.PENDING, 'active': NodeState.RUNNING} def list_nodes(self): return self._list_resources('/v1/server/list', self._to_node) def list_locations(self): return self._list_resources('/v1/regions/list', self._to_location) def list_sizes(self): return self._list_resources('/v1/plans/list', self._to_size) def list_images(self): return self._list_resources('/v1/os/list', self._to_image) def create_node(self, name, size, image, location): params = {'DCID': location.id, 'VPSPLANID': size.id, 'OSID': image.id, 'label': name} result = self.connection.post('/v1/server/create', params) if result.status != httplib.OK: return False subid = result.object['SUBID'] retry_count = 3 created_node = None for i in range(retry_count): try: nodes = self.list_nodes() created_node = [n for n in nodes if n.id == subid][0] except IndexError: time.sleep(1) pass else: break return created_node def reboot_node(self, node): params = {'SUBID': node.id} res = self.connection.post('/v1/server/reboot', params) return res.status == httplib.OK def destroy_node(self, node): params = {'SUBID': node.id} res = self.connection.post('/v1/server/destroy', params) return res.status == httplib.OK def _list_resources(self, url, tranform_func): data = self.connection.get(url).object sorted_key = sorted(data) return [tranform_func(data[key]) for key in sorted_key] def _to_node(self, data): if 'status' in data: state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) if state == NodeState.RUNNING and \ data['power_status'] != 'running': state = NodeState.STOPPED else: state = NodeState.UNKNOWN if 'main_ip' in data and data['main_ip'] is not None: public_ips = [data['main_ip']] else: public_ips = [] extra_keys = [] extra = {} for key in extra_keys: if key in data: extra[key] = data[key] node = Node(id=data['SUBID'], name=data['label'], state=state, public_ips=public_ips, private_ips=None, extra=extra, driver=self) return node def _to_location(self, data): return NodeLocation(id=data['DCID'], name=data['name'], country=data['country'], driver=self) def _to_size(self, data): extra = {'vcpu_count': int(data['vcpu_count'])} ram = int(data['ram']) disk = int(data['disk']) bandwidth = float(data['bandwidth']) price = float(data['price_per_month']) return NodeSize(id=data['VPSPLANID'], name=data['name'], ram=ram, disk=disk, bandwidth=bandwidth, price=price, extra=extra, driver=self) def _to_image(self, data): extra = {'arch': data['arch'], 'family': data['family']} return NodeImage(id=data['OSID'], name=data['name'], extra=extra, driver=self)
apache-2.0
gabbayo/git-repo
subcmds/diffmanifests.py
3
7574
# -*- coding:utf-8 -*- # # Copyright (C) 2014 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from color import Coloring from command import PagedCommand from manifest_xml import XmlManifest class _Coloring(Coloring): def __init__(self, config): Coloring.__init__(self, config, "status") class Diffmanifests(PagedCommand): """ A command to see logs in projects represented by manifests This is used to see deeper differences between manifests. Where a simple diff would only show a diff of sha1s for example, this command will display the logs of the project between both sha1s, allowing user to see diff at a deeper level. """ common = True helpSummary = "Manifest diff utility" helpUsage = """%prog manifest1.xml [manifest2.xml] [options]""" helpDescription = """ The %prog command shows differences between project revisions of manifest1 and manifest2. if manifest2 is not specified, current manifest.xml will be used instead. Both absolute and relative paths may be used for manifests. Relative paths start from project's ".repo/manifests" folder. The --raw option Displays the diff in a way that facilitates parsing, the project pattern will be <status> <path> <revision from> [<revision to>] and the commit pattern will be <status> <onelined log> with status values respectively : A = Added project R = Removed project C = Changed project U = Project with unreachable revision(s) (revision(s) not found) for project, and A = Added commit R = Removed commit for a commit. Only changed projects may contain commits, and commit status always starts with a space, and are part of last printed project. Unreachable revisions may occur if project is not up to date or if repo has not been initialized with all the groups, in which case some projects won't be synced and their revisions won't be found. """ def _Options(self, p): p.add_option('--raw', dest='raw', action='store_true', help='Display raw diff.') p.add_option('--no-color', dest='color', action='store_false', default=True, help='does not display the diff in color.') p.add_option('--pretty-format', dest='pretty_format', action='store', metavar='<FORMAT>', help='print the log using a custom git pretty format string') def _printRawDiff(self, diff): for project in diff['added']: self.printText("A %s %s" % (project.relpath, project.revisionExpr)) self.out.nl() for project in diff['removed']: self.printText("R %s %s" % (project.relpath, project.revisionExpr)) self.out.nl() for project, otherProject in diff['changed']: self.printText("C %s %s %s" % (project.relpath, project.revisionExpr, otherProject.revisionExpr)) self.out.nl() self._printLogs(project, otherProject, raw=True, color=False) for project, otherProject in diff['unreachable']: self.printText("U %s %s %s" % (project.relpath, project.revisionExpr, otherProject.revisionExpr)) self.out.nl() def _printDiff(self, diff, color=True, pretty_format=None): if diff['added']: self.out.nl() self.printText('added projects : \n') self.out.nl() for project in diff['added']: self.printProject('\t%s' % (project.relpath)) self.printText(' at revision ') self.printRevision(project.revisionExpr) self.out.nl() if diff['removed']: self.out.nl() self.printText('removed projects : \n') self.out.nl() for project in diff['removed']: self.printProject('\t%s' % (project.relpath)) self.printText(' at revision ') self.printRevision(project.revisionExpr) self.out.nl() if diff['changed']: self.out.nl() self.printText('changed projects : \n') self.out.nl() for project, otherProject in diff['changed']: self.printProject('\t%s' % (project.relpath)) self.printText(' changed from ') self.printRevision(project.revisionExpr) self.printText(' to ') self.printRevision(otherProject.revisionExpr) self.out.nl() self._printLogs(project, otherProject, raw=False, color=color, pretty_format=pretty_format) self.out.nl() if diff['unreachable']: self.out.nl() self.printText('projects with unreachable revisions : \n') self.out.nl() for project, otherProject in diff['unreachable']: self.printProject('\t%s ' % (project.relpath)) self.printRevision(project.revisionExpr) self.printText(' or ') self.printRevision(otherProject.revisionExpr) self.printText(' not found') self.out.nl() def _printLogs(self, project, otherProject, raw=False, color=True, pretty_format=None): logs = project.getAddedAndRemovedLogs(otherProject, oneline=(pretty_format is None), color=color, pretty_format=pretty_format) if logs['removed']: removedLogs = logs['removed'].split('\n') for log in removedLogs: if log.strip(): if raw: self.printText(' R ' + log) self.out.nl() else: self.printRemoved('\t\t[-] ') self.printText(log) self.out.nl() if logs['added']: addedLogs = logs['added'].split('\n') for log in addedLogs: if log.strip(): if raw: self.printText(' A ' + log) self.out.nl() else: self.printAdded('\t\t[+] ') self.printText(log) self.out.nl() def ValidateOptions(self, opt, args): if not args or len(args) > 2: self.OptionParser.error('missing manifests to diff') def Execute(self, opt, args): self.out = _Coloring(self.manifest.globalConfig) self.printText = self.out.nofmt_printer('text') if opt.color: self.printProject = self.out.nofmt_printer('project', attr = 'bold') self.printAdded = self.out.nofmt_printer('green', fg = 'green', attr = 'bold') self.printRemoved = self.out.nofmt_printer('red', fg = 'red', attr = 'bold') self.printRevision = self.out.nofmt_printer('revision', fg = 'yellow') else: self.printProject = self.printAdded = self.printRemoved = self.printRevision = self.printText manifest1 = XmlManifest(self.manifest.repodir) manifest1.Override(args[0], load_local_manifests=False) if len(args) == 1: manifest2 = self.manifest else: manifest2 = XmlManifest(self.manifest.repodir) manifest2.Override(args[1], load_local_manifests=False) diff = manifest1.projectsDiff(manifest2) if opt.raw: self._printRawDiff(diff) else: self._printDiff(diff, color=opt.color, pretty_format=opt.pretty_format)
apache-2.0
endlessm/chromium-browser
third_party/skia/tools/skp/page_sets/skia_gmail_desktop.py
8
1699
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 import os from page_sets.login_helpers import google_login from telemetry import story from telemetry.page import page as page_module from telemetry.page import shared_page_state from telemetry.util import wpr_modes class SkiaBuildbotDesktopPage(page_module.Page): def __init__(self, url, page_set): super(SkiaBuildbotDesktopPage, self).__init__( url=url, name=url, page_set=page_set, shared_page_state_class=shared_page_state.SharedDesktopPageState) self.archive_data_file = 'data/skia_gmail_desktop.json' def RunSmoothness(self, action_runner): action_runner.ScrollElement() def RunNavigateSteps(self, action_runner): if self.wpr_mode != wpr_modes.WPR_REPLAY: credentials_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data/credentials.json') google_login.BaseLoginGoogle(action_runner, 'google', credentials_path) action_runner.Wait(10) action_runner.Navigate(self.url) action_runner.Wait(10) class SkiaGmailDesktopPageSet(story.StorySet): """ Pages designed to represent the median, not highly optimized web """ def __init__(self): super(SkiaGmailDesktopPageSet, self).__init__( archive_data_file='data/skia_gmail_desktop.json') urls_list = [ # Why: productivity, top google properties, long email . 'https://mail.google.com/mail/?shva=1#inbox/13ba91194d0b8a2e', ] for url in urls_list: self.AddStory(SkiaBuildbotDesktopPage(url, self))
bsd-3-clause
Bloodyaugust/pongsugarlabcpp
lib/boost/tools/build/src/util/path.py
6
30017
# Status: this module is ported on demand by however needs something # from it. Functionality that is not needed by Python port will # be dropped. # Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and # distribute this software is granted provided this copyright notice appears in # all copies. This software is provided "as is" without express or implied # warranty, and with no claim as to its suitability for any purpose. # Performs various path manipulations. Path are always in a 'normilized' # representation. In it, a path may be either: # # - '.', or # # - ['/'] [ ( '..' '/' )* (token '/')* token ] # # In plain english, path can be rooted, '..' elements are allowed only # at the beginning, and it never ends in slash, except for path consisting # of slash only. import os.path from utility import to_seq from glob import glob as builtin_glob from b2.util import bjam_signature @bjam_signature((["path", "root"],)) def root (path, root): """ If 'path' is relative, it is rooted at 'root'. Otherwise, it's unchanged. """ if os.path.isabs (path): return path else: return os.path.join (root, path) @bjam_signature((["native"],)) def make (native): """ Converts the native path into normalized form. """ # TODO: make os selection here. return make_UNIX (native) def make_UNIX (native): # VP: I have no idea now 'native' can be empty here! But it can! assert (native) return os.path.normpath (native) @bjam_signature((["path"],)) def native (path): """ Builds a native representation of the path. """ # TODO: make os selection here. return native_UNIX (path) def native_UNIX (path): return path def pwd (): """ Returns the current working directory. # TODO: is it a good idea to use the current dir? Some use-cases may not allow us to depend on the current dir. """ return make (os.getcwd ()) def is_rooted (path): """ Tests if a path is rooted. """ return path and path [0] == '/' ################################################################### # Still to port. # Original lines are prefixed with "# " # # # Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and # # distribute this software is granted provided this copyright notice appears in # # all copies. This software is provided "as is" without express or implied # # warranty, and with no claim as to its suitability for any purpose. # # # Performs various path manipulations. Path are always in a 'normilized' # # representation. In it, a path may be either: # # # # - '.', or # # # # - ['/'] [ ( '..' '/' )* (token '/')* token ] # # # # In plain english, path can be rooted, '..' elements are allowed only # # at the beginning, and it never ends in slash, except for path consisting # # of slash only. # # import modules ; # import sequence ; # import regex ; # import errors : error ; # # # os = [ modules.peek : OS ] ; # if [ modules.peek : UNIX ] # { # local uname = [ modules.peek : JAMUNAME ] ; # switch $(uname) # { # case CYGWIN* : # os = CYGWIN ; # # case * : # os = UNIX ; # } # } # # # # # Tests if a path is rooted. # # # rule is-rooted ( path ) # { # return [ MATCH "^(/)" : $(path) ] ; # } # # # # # Tests if a path has a parent. # # # rule has-parent ( path ) # { # if $(path) != / { # return 1 ; # } else { # return ; # } # } # # # # # Returns the path without any directory components. # # # rule basename ( path ) # { # return [ MATCH "([^/]+)$" : $(path) ] ; # } # # # # # Returns parent directory of the path. If no parent exists, error is issued. # # # rule parent ( path ) # { # if [ has-parent $(path) ] { # # if $(path) = . { # return .. ; # } else { # # # Strip everything at the end of path up to and including # # the last slash # local result = [ regex.match "((.*)/)?([^/]+)" : $(path) : 2 3 ] ; # # # Did we strip what we shouldn't? # if $(result[2]) = ".." { # return $(path)/.. ; # } else { # if ! $(result[1]) { # if [ is-rooted $(path) ] { # result = / ; # } else { # result = . ; # } # } # return $(result[1]) ; # } # } # } else { # error "Path '$(path)' has no parent" ; # } # } # # # # # Returns path2 such that "[ join path path2 ] = .". # # The path may not contain ".." element or be rooted. # # # rule reverse ( path ) # { # if $(path) = . # { # return $(path) ; # } # else # { # local tokens = [ regex.split $(path) "/" ] ; # local tokens2 ; # for local i in $(tokens) { # tokens2 += .. ; # } # return [ sequence.join $(tokens2) : "/" ] ; # } # } def reverse(path): """Returns path2 such that `os.path.join(path, path2) == '.'`. `path` may not contain '..' or be rooted. Args: path (str): the path to reverse Returns: the string of the reversed path Example: >>> p1 = 'path/to/somewhere' >>> p2 = reverse('path/to/somewhere') >>> p2 '../../..' >>> os.path.normpath(os.path.join(p1, p2)) '.' """ if is_rooted(path) or '..' in path: from b2.manager import get_manager get_manager().errors()( 'reverse(path): path is either rooted or contains ".." in the path') if path == '.': return path path = os.path.normpath(path) # os.sep.join() is being used over os.path.join() due # to an extra '..' that is created by os.path.join() return os.sep.join('..' for t in path.split(os.sep)) # # # # Auxillary rule: does all the semantic of 'join', except for error cheching. # # The error checking is separated because this rule is recursive, and I don't # # like the idea of checking the same input over and over. # # # local rule join-imp ( elements + ) # { # return [ NORMALIZE_PATH $(elements:J="/") ] ; # } # # # # # Contanenates the passed path elements. Generates an error if # # any element other than the first one is rooted. # # # rule join ( elements + ) # { # if ! $(elements[2]) # { # return $(elements[1]) ; # } # else # { # for local e in $(elements[2-]) # { # if [ is-rooted $(e) ] # { # error only first element may be rooted ; # } # } # return [ join-imp $(elements) ] ; # } # } def glob (dirs, patterns): """ Returns the list of files matching the given pattern in the specified directory. Both directories and patterns are supplied as portable paths. Each pattern should be non-absolute path, and can't contain "." or ".." elements. Each slash separated element of pattern can contain the following special characters: - '?', which match any character - '*', which matches arbitrary number of characters. A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3 if and only if e1 matches p1, e2 matches p2 and so on. For example: [ glob . : *.cpp ] [ glob . : */build/Jamfile ] """ # { # local result ; # if $(patterns:D) # { # # When a pattern has a directory element, we first glob for # # directory, and then glob for file name is the found directories. # for local p in $(patterns) # { # # First glob for directory part. # local globbed-dirs = [ glob $(dirs) : $(p:D) ] ; # result += [ glob $(globbed-dirs) : $(p:D="") ] ; # } # } # else # { # # When a pattern has not directory, we glob directly. # # Take care of special ".." value. The "GLOB" rule simply ignores # # the ".." element (and ".") element in directory listings. This is # # needed so that # # # # [ glob libs/*/Jamfile ] # # # # don't return # # # # libs/../Jamfile (which is the same as ./Jamfile) # # # # On the other hand, when ".." is explicitly present in the pattern # # we need to return it. # # # for local dir in $(dirs) # { # for local p in $(patterns) # { # if $(p) != ".." # { # result += [ sequence.transform make # : [ GLOB [ native $(dir) ] : $(p) ] ] ; # } # else # { # result += [ path.join $(dir) .. ] ; # } # } # } # } # return $(result) ; # } # # TODO: (PF) I replaced the code above by this. I think it should work but needs to be tested. result = [] dirs = to_seq (dirs) patterns = to_seq (patterns) splitdirs = [] for dir in dirs: splitdirs += dir.split (os.pathsep) for dir in splitdirs: for pattern in patterns: p = os.path.join (dir, pattern) import glob result.extend (glob.glob (p)) return result # # Find out the absolute name of path and returns the list of all the parents, # starting with the immediate one. Parents are returned as relative names. # If 'upper_limit' is specified, directories above it will be pruned. # def all_parents(path, upper_limit=None, cwd=None): if not cwd: cwd = os.getcwd() path_abs = os.path.join(cwd, path) if upper_limit: upper_limit = os.path.join(cwd, upper_limit) result = [] while path_abs and path_abs != upper_limit: (head, tail) = os.path.split(path) path = os.path.join(path, "..") result.append(path) path_abs = head if upper_limit and path_abs != upper_limit: raise BaseException("'%s' is not a prefix of '%s'" % (upper_limit, path)) return result # Search for 'pattern' in parent directories of 'dir', up till and including # 'upper_limit', if it is specified, or till the filesystem root otherwise. # def glob_in_parents(dir, patterns, upper_limit=None): result = [] parent_dirs = all_parents(dir, upper_limit) for p in parent_dirs: result = glob(p, patterns) if result: break return result # # # # # Assuming 'child' is a subdirectory of 'parent', return the relative # # path from 'parent' to 'child' # # # rule relative ( child parent ) # { # if $(parent) = "." # { # return $(child) ; # } # else # { # local split1 = [ regex.split $(parent) / ] ; # local split2 = [ regex.split $(child) / ] ; # # while $(split1) # { # if $(split1[1]) = $(split2[1]) # { # split1 = $(split1[2-]) ; # split2 = $(split2[2-]) ; # } # else # { # errors.error $(child) is not a subdir of $(parent) ; # } # } # return [ join $(split2) ] ; # } # } # # # Returns the minimal path to path2 that is relative path1. # # # rule relative-to ( path1 path2 ) # { # local root_1 = [ regex.split [ reverse $(path1) ] / ] ; # local split1 = [ regex.split $(path1) / ] ; # local split2 = [ regex.split $(path2) / ] ; # # while $(split1) && $(root_1) # { # if $(split1[1]) = $(split2[1]) # { # root_1 = $(root_1[2-]) ; # split1 = $(split1[2-]) ; # split2 = $(split2[2-]) ; # } # else # { # split1 = ; # } # } # return [ join . $(root_1) $(split2) ] ; # } # Returns the list of paths which are used by the operating system # for looking up programs def programs_path (): raw = [] names = ['PATH', 'Path', 'path'] for name in names: raw.append(os.environ.get (name, '')) result = [] for elem in raw: if elem: for p in elem.split(os.path.pathsep): # it's possible that the user's Path has # double path separators, thus it is possible # for p to be an empty string. if p: result.append(make(p)) return result # rule make-NT ( native ) # { # local tokens = [ regex.split $(native) "[/\\]" ] ; # local result ; # # # Handle paths ending with slashes # if $(tokens[-1]) = "" # { # tokens = $(tokens[1--2]) ; # discard the empty element # } # # result = [ path.join $(tokens) ] ; # # if [ regex.match "(^.:)" : $(native) ] # { # result = /$(result) ; # } # # if $(native) = "" # { # result = "." ; # } # # return $(result) ; # } # # rule native-NT ( path ) # { # local result = [ MATCH "^/?(.*)" : $(path) ] ; # result = [ sequence.join [ regex.split $(result) "/" ] : "\\" ] ; # return $(result) ; # } # # rule make-CYGWIN ( path ) # { # return [ make-NT $(path) ] ; # } # # rule native-CYGWIN ( path ) # { # local result = $(path) ; # if [ regex.match "(^/.:)" : $(path) ] # win absolute # { # result = [ MATCH "^/?(.*)" : $(path) ] ; # remove leading '/' # } # return [ native-UNIX $(result) ] ; # } # # # # # split-VMS: splits input native path into # # device dir file (each part is optional), # # example: # # # # dev:[dir]file.c => dev: [dir] file.c # # # rule split-path-VMS ( native ) # { # local matches = [ MATCH ([a-zA-Z0-9_-]+:)?(\\[[^\]]*\\])?(.*)?$ : $(native) ] ; # local device = $(matches[1]) ; # local dir = $(matches[2]) ; # local file = $(matches[3]) ; # # return $(device) $(dir) $(file) ; # } # # # # # Converts a native VMS path into a portable path spec. # # # # Does not handle current-device absolute paths such # # as "[dir]File.c" as it is not clear how to represent # # them in the portable path notation. # # # # Adds a trailing dot (".") to the file part if no extension # # is present (helps when converting it back into native path). # # # rule make-VMS ( native ) # { # if [ MATCH ^(\\[[a-zA-Z0-9]) : $(native) ] # { # errors.error "Can't handle default-device absolute paths: " $(native) ; # } # # local parts = [ split-path-VMS $(native) ] ; # local device = $(parts[1]) ; # local dir = $(parts[2]) ; # local file = $(parts[3]) ; # local elems ; # # if $(device) # { # # # # rooted # # # elems = /$(device) ; # } # # if $(dir) = "[]" # { # # # # Special case: current directory # # # elems = $(elems) "." ; # } # else if $(dir) # { # dir = [ regex.replace $(dir) "\\[|\\]" "" ] ; # local dir_parts = [ regex.split $(dir) \\. ] ; # # if $(dir_parts[1]) = "" # { # # # # Relative path # # # dir_parts = $(dir_parts[2--1]) ; # } # # # # # replace "parent-directory" parts (- => ..) # # # dir_parts = [ regex.replace-list $(dir_parts) : - : .. ] ; # # elems = $(elems) $(dir_parts) ; # } # # if $(file) # { # if ! [ MATCH (\\.) : $(file) ] # { # # # # Always add "." to end of non-extension file # # # file = $(file). ; # } # elems = $(elems) $(file) ; # } # # local portable = [ path.join $(elems) ] ; # # return $(portable) ; # } # # # # # Converts a portable path spec into a native VMS path. # # # # Relies on having at least one dot (".") included in the file # # name to be able to differentiate it ftom the directory part. # # # rule native-VMS ( path ) # { # local device = "" ; # local dir = $(path) ; # local file = "" ; # local native ; # local split ; # # # # # Has device ? # # # if [ is-rooted $(dir) ] # { # split = [ MATCH ^/([^:]+:)/?(.*) : $(dir) ] ; # device = $(split[1]) ; # dir = $(split[2]) ; # } # # # # # Has file ? # # # # This is no exact science, just guess work: # # # # If the last part of the current path spec # # includes some chars, followed by a dot, # # optionally followed by more chars - # # then it is a file (keep your fingers crossed). # # # split = [ regex.split $(dir) / ] ; # local maybe_file = $(split[-1]) ; # # if [ MATCH ^([^.]+\\..*) : $(maybe_file) ] # { # file = $(maybe_file) ; # dir = [ sequence.join $(split[1--2]) : / ] ; # } # # # # # Has dir spec ? # # # if $(dir) = "." # { # dir = "[]" ; # } # else if $(dir) # { # dir = [ regex.replace $(dir) \\.\\. - ] ; # dir = [ regex.replace $(dir) / . ] ; # # if $(device) = "" # { # # # # Relative directory # # # dir = "."$(dir) ; # } # dir = "["$(dir)"]" ; # } # # native = [ sequence.join $(device) $(dir) $(file) ] ; # # return $(native) ; # } # # # rule __test__ ( ) { # # import assert ; # import errors : try catch ; # # assert.true is-rooted "/" ; # assert.true is-rooted "/foo" ; # assert.true is-rooted "/foo/bar" ; # assert.result : is-rooted "." ; # assert.result : is-rooted "foo" ; # assert.result : is-rooted "foo/bar" ; # # assert.true has-parent "foo" ; # assert.true has-parent "foo/bar" ; # assert.true has-parent "." ; # assert.result : has-parent "/" ; # # assert.result "." : basename "." ; # assert.result ".." : basename ".." ; # assert.result "foo" : basename "foo" ; # assert.result "foo" : basename "bar/foo" ; # assert.result "foo" : basename "gaz/bar/foo" ; # assert.result "foo" : basename "/gaz/bar/foo" ; # # assert.result "." : parent "foo" ; # assert.result "/" : parent "/foo" ; # assert.result "foo/bar" : parent "foo/bar/giz" ; # assert.result ".." : parent "." ; # assert.result ".." : parent "../foo" ; # assert.result "../../foo" : parent "../../foo/bar" ; # # # assert.result "." : reverse "." ; # assert.result ".." : reverse "foo" ; # assert.result "../../.." : reverse "foo/bar/giz" ; # # assert.result "foo" : join "foo" ; # assert.result "/foo" : join "/" "foo" ; # assert.result "foo/bar" : join "foo" "bar" ; # assert.result "foo/bar" : join "foo/giz" "../bar" ; # assert.result "foo/giz" : join "foo/bar/baz" "../../giz" ; # assert.result ".." : join "." ".." ; # assert.result ".." : join "foo" "../.." ; # assert.result "../.." : join "../foo" "../.." ; # assert.result "/foo" : join "/bar" "../foo" ; # assert.result "foo/giz" : join "foo/giz" "." ; # assert.result "." : join lib2 ".." ; # assert.result "/" : join "/a" ".." ; # # assert.result /a/b : join /a/b/c .. ; # # assert.result "foo/bar/giz" : join "foo" "bar" "giz" ; # assert.result "giz" : join "foo" ".." "giz" ; # assert.result "foo/giz" : join "foo" "." "giz" ; # # try ; # { # join "a" "/b" ; # } # catch only first element may be rooted ; # # local CWD = "/home/ghost/build" ; # assert.result : all-parents . : . : $(CWD) ; # assert.result . .. ../.. ../../.. : all-parents "Jamfile" : "" : $(CWD) ; # assert.result foo . .. ../.. ../../.. : all-parents "foo/Jamfile" : "" : $(CWD) ; # assert.result ../Work .. ../.. ../../.. : all-parents "../Work/Jamfile" : "" : $(CWD) ; # # local CWD = "/home/ghost" ; # assert.result . .. : all-parents "Jamfile" : "/home" : $(CWD) ; # assert.result . : all-parents "Jamfile" : "/home/ghost" : $(CWD) ; # # assert.result "c/d" : relative "a/b/c/d" "a/b" ; # assert.result "foo" : relative "foo" "." ; # # local save-os = [ modules.peek path : os ] ; # modules.poke path : os : NT ; # # assert.result "foo/bar/giz" : make "foo/bar/giz" ; # assert.result "foo/bar/giz" : make "foo\\bar\\giz" ; # assert.result "foo" : make "foo/." ; # assert.result "foo" : make "foo/bar/.." ; # assert.result "/D:/My Documents" : make "D:\\My Documents" ; # assert.result "/c:/boost/tools/build/new/project.jam" : make "c:\\boost\\tools\\build\\test\\..\\new\\project.jam" ; # # assert.result "foo\\bar\\giz" : native "foo/bar/giz" ; # assert.result "foo" : native "foo" ; # assert.result "D:\\My Documents\\Work" : native "/D:/My Documents/Work" ; # # modules.poke path : os : UNIX ; # # assert.result "foo/bar/giz" : make "foo/bar/giz" ; # assert.result "/sub1" : make "/sub1/." ; # assert.result "/sub1" : make "/sub1/sub2/.." ; # assert.result "sub1" : make "sub1/." ; # assert.result "sub1" : make "sub1/sub2/.." ; # assert.result "/foo/bar" : native "/foo/bar" ; # # modules.poke path : os : VMS ; # # # # # Don't really need to poke os before these # # # assert.result "disk:" "[dir]" "file" : split-path-VMS "disk:[dir]file" ; # assert.result "disk:" "[dir]" "" : split-path-VMS "disk:[dir]" ; # assert.result "disk:" "" "" : split-path-VMS "disk:" ; # assert.result "disk:" "" "file" : split-path-VMS "disk:file" ; # assert.result "" "[dir]" "file" : split-path-VMS "[dir]file" ; # assert.result "" "[dir]" "" : split-path-VMS "[dir]" ; # assert.result "" "" "file" : split-path-VMS "file" ; # assert.result "" "" "" : split-path-VMS "" ; # # # # # Special case: current directory # # # assert.result "" "[]" "" : split-path-VMS "[]" ; # assert.result "disk:" "[]" "" : split-path-VMS "disk:[]" ; # assert.result "" "[]" "file" : split-path-VMS "[]file" ; # assert.result "disk:" "[]" "file" : split-path-VMS "disk:[]file" ; # # # # # Make portable paths # # # assert.result "/disk:" : make "disk:" ; # assert.result "foo/bar/giz" : make "[.foo.bar.giz]" ; # assert.result "foo" : make "[.foo]" ; # assert.result "foo" : make "[.foo.bar.-]" ; # assert.result ".." : make "[.-]" ; # assert.result ".." : make "[-]" ; # assert.result "." : make "[]" ; # assert.result "giz.h" : make "giz.h" ; # assert.result "foo/bar/giz.h" : make "[.foo.bar]giz.h" ; # assert.result "/disk:/my_docs" : make "disk:[my_docs]" ; # assert.result "/disk:/boost/tools/build/new/project.jam" : make "disk:[boost.tools.build.test.-.new]project.jam" ; # # # # # Special case (adds '.' to end of file w/o extension to # # disambiguate from directory in portable path spec). # # # assert.result "Jamfile." : make "Jamfile" ; # assert.result "dir/Jamfile." : make "[.dir]Jamfile" ; # assert.result "/disk:/dir/Jamfile." : make "disk:[dir]Jamfile" ; # # # # # Make native paths # # # assert.result "disk:" : native "/disk:" ; # assert.result "[.foo.bar.giz]" : native "foo/bar/giz" ; # assert.result "[.foo]" : native "foo" ; # assert.result "[.-]" : native ".." ; # assert.result "[.foo.-]" : native "foo/.." ; # assert.result "[]" : native "." ; # assert.result "disk:[my_docs.work]" : native "/disk:/my_docs/work" ; # assert.result "giz.h" : native "giz.h" ; # assert.result "disk:Jamfile." : native "/disk:Jamfile." ; # assert.result "disk:[my_docs.work]Jamfile." : native "/disk:/my_docs/work/Jamfile." ; # # modules.poke path : os : $(save-os) ; # # } # #def glob(dir, patterns): # result = [] # for pattern in patterns: # result.extend(builtin_glob(os.path.join(dir, pattern))) # return result def glob(dirs, patterns, exclude_patterns=None): """Returns the list of files matching the given pattern in the specified directory. Both directories and patterns are supplied as portable paths. Each pattern should be non-absolute path, and can't contain '.' or '..' elements. Each slash separated element of pattern can contain the following special characters: - '?', which match any character - '*', which matches arbitrary number of characters. A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3 if and only if e1 matches p1, e2 matches p2 and so on. For example: [ glob . : *.cpp ] [ glob . : */build/Jamfile ] """ assert(isinstance(patterns, list)) assert(isinstance(dirs, list)) if not exclude_patterns: exclude_patterns = [] else: assert(isinstance(exclude_patterns, list)) real_patterns = [os.path.join(d, p) for p in patterns for d in dirs] real_exclude_patterns = [os.path.join(d, p) for p in exclude_patterns for d in dirs] inc = [os.path.normpath(name) for p in real_patterns for name in builtin_glob(p)] exc = [os.path.normpath(name) for p in real_exclude_patterns for name in builtin_glob(p)] return [x for x in inc if x not in exc] def glob_tree(roots, patterns, exclude_patterns=None): """Recursive version of GLOB. Builds the glob of files while also searching in the subdirectories of the given roots. An optional set of exclusion patterns will filter out the matching entries from the result. The exclusions also apply to the subdirectory scanning, such that directories that match the exclusion patterns will not be searched.""" if not exclude_patterns: exclude_patterns = [] result = glob(roots, patterns, exclude_patterns) subdirs = [s for s in glob(roots, ["*"]) if s != "." and s != ".." and os.path.isdir(s)] if subdirs: result.extend(glob_tree(subdirs, patterns, exclude_patterns)) return result def glob_in_parents(dir, patterns, upper_limit=None): """Recursive version of GLOB which glob sall parent directories of dir until the first match is found. Returns an empty result if no match is found""" assert(isinstance(dir, str)) assert(isinstance(patterns, list)) result = [] absolute_dir = os.path.join(os.getcwd(), dir) absolute_dir = os.path.normpath(absolute_dir) while absolute_dir: new_dir = os.path.split(absolute_dir)[0] if new_dir == absolute_dir: break result = glob([new_dir], patterns) if result: break absolute_dir = new_dir return result # The relpath functionality is written by # Cimarron Taylor def split(p, rest=[]): (h,t) = os.path.split(p) if len(h) < 1: return [t]+rest if len(t) < 1: return [h]+rest return split(h,[t]+rest) def commonpath(l1, l2, common=[]): if len(l1) < 1: return (common, l1, l2) if len(l2) < 1: return (common, l1, l2) if l1[0] != l2[0]: return (common, l1, l2) return commonpath(l1[1:], l2[1:], common+[l1[0]]) def relpath(p1, p2): (common,l1,l2) = commonpath(split(p1), split(p2)) p = [] if len(l1) > 0: p = [ '../' * len(l1) ] p = p + l2 if p: return os.path.join( *p ) else: return "."
gpl-2.0
tfhq/googletest
test/gtest_help_test.py
2968
5856
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the --help flag of Google C++ Testing Framework. SYNOPSIS gtest_help_test.py --build_dir=BUILD/DIR # where BUILD/DIR contains the built gtest_help_test_ file. gtest_help_test.py """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import gtest_test_utils IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' IS_WINDOWS = os.name == 'nt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_') FLAG_PREFIX = '--gtest_' DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style' STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to' UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing' LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG), re.sub('^--', '/', LIST_TESTS_FLAG), re.sub('_', '-', LIST_TESTS_FLAG)] INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing' SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess( [PROGRAM_PATH, LIST_TESTS_FLAG]).output # The help message must match this regex. HELP_REGEX = re.compile( FLAG_PREFIX + r'list_tests.*' + FLAG_PREFIX + r'filter=.*' + FLAG_PREFIX + r'also_run_disabled_tests.*' + FLAG_PREFIX + r'repeat=.*' + FLAG_PREFIX + r'shuffle.*' + FLAG_PREFIX + r'random_seed=.*' + FLAG_PREFIX + r'color=.*' + FLAG_PREFIX + r'print_time.*' + FLAG_PREFIX + r'output=.*' + FLAG_PREFIX + r'break_on_failure.*' + FLAG_PREFIX + r'throw_on_failure.*' + FLAG_PREFIX + r'catch_exceptions=0.*', re.DOTALL) def RunWithFlag(flag): """Runs gtest_help_test_ with the given flag. Returns: the exit code and the text output as a tuple. Args: flag: the command-line flag to pass to gtest_help_test_, or None. """ if flag is None: command = [PROGRAM_PATH] else: command = [PROGRAM_PATH, flag] child = gtest_test_utils.Subprocess(command) return child.exit_code, child.output class GTestHelpTest(gtest_test_utils.TestCase): """Tests the --help flag and its equivalent forms.""" def TestHelpFlag(self, flag): """Verifies correct behavior when help flag is specified. The right message must be printed and the tests must skipped when the given flag is specified. Args: flag: A flag to pass to the binary or None. """ exit_code, output = RunWithFlag(flag) self.assertEquals(0, exit_code) self.assert_(HELP_REGEX.search(output), output) if IS_LINUX: self.assert_(STREAM_RESULT_TO_FLAG in output, output) else: self.assert_(STREAM_RESULT_TO_FLAG not in output, output) if SUPPORTS_DEATH_TESTS and not IS_WINDOWS: self.assert_(DEATH_TEST_STYLE_FLAG in output, output) else: self.assert_(DEATH_TEST_STYLE_FLAG not in output, output) def TestNonHelpFlag(self, flag): """Verifies correct behavior when no help flag is specified. Verifies that when no help flag is specified, the tests are run and the help message is not printed. Args: flag: A flag to pass to the binary or None. """ exit_code, output = RunWithFlag(flag) self.assert_(exit_code != 0) self.assert_(not HELP_REGEX.search(output), output) def testPrintsHelpWithFullFlag(self): self.TestHelpFlag('--help') def testPrintsHelpWithShortFlag(self): self.TestHelpFlag('-h') def testPrintsHelpWithQuestionFlag(self): self.TestHelpFlag('-?') def testPrintsHelpWithWindowsStyleQuestionFlag(self): self.TestHelpFlag('/?') def testPrintsHelpWithUnrecognizedGoogleTestFlag(self): self.TestHelpFlag(UNKNOWN_FLAG) def testPrintsHelpWithIncorrectFlagStyle(self): for incorrect_flag in INCORRECT_FLAG_VARIANTS: self.TestHelpFlag(incorrect_flag) def testRunsTestsWithoutHelpFlag(self): """Verifies that when no help flag is specified, the tests are run and the help message is not printed.""" self.TestNonHelpFlag(None) def testRunsTestsWithGtestInternalFlag(self): """Verifies that the tests are run and no help message is printed when a flag starting with Google Test prefix and 'internal_' is supplied.""" self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
YourCyborg/Sun-RPI
src/objects/admin.py
1
5428
# # This sets up how models are displayed # in the web admin interface. # from django import forms from django.conf import settings from django.contrib import admin from src.objects.models import ObjAttribute, ObjectDB, ObjectNick, Alias from src.utils.utils import mod_import class ObjAttributeInline(admin.TabularInline): model = ObjAttribute fields = ('db_key', 'db_value') extra = 0 class NickInline(admin.TabularInline): model = ObjectNick fields = ('db_nick', 'db_real', 'db_type') extra = 0 class AliasInline(admin.TabularInline): model = Alias fields = ("db_key",) extra = 0 class ObjectCreateForm(forms.ModelForm): "This form details the look of the fields" class Meta: model = ObjectDB db_key = forms.CharField(label="Name/Key", widget=forms.TextInput(attrs={'size':'78'}), help_text="Main identifier, like 'apple', 'strong guy', 'Elizabeth' etc. If creating a Character, check so the name is unique among characters!",) db_typeclass_path = forms.CharField(label="Typeclass",initial="Change to (for example) %s or %s." % (settings.BASE_OBJECT_TYPECLASS, settings.BASE_CHARACTER_TYPECLASS), widget=forms.TextInput(attrs={'size':'78'}), help_text="This defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass. If you are creating a Character you should use the typeclass defined by settings.BASE_CHARACTER_TYPECLASS or one derived from that.") db_permissions = forms.CharField(label="Permissions", initial=settings.PERMISSION_PLAYER_DEFAULT, required=False, widget=forms.TextInput(attrs={'size':'78'}), help_text="a comma-separated list of text strings checked by certain locks. They are mainly of use for Character objects. Character permissions overload permissions defined on a controlling Player. Most objects normally don't have any permissions defined.") db_cmdset_storage = forms.CharField(label="CmdSet", initial=settings.CMDSET_DEFAULT, required=False, widget=forms.TextInput(attrs={'size':'78'}), help_text="Most non-character objects don't need a cmdset and can leave this field blank.") class ObjectEditForm(ObjectCreateForm): "Form used for editing. Extends the create one with more fields" db_lock_storage = forms.CharField(label="Locks", required=False, widget=forms.Textarea(attrs={'cols':'100', 'rows':'2'}), help_text="In-game lock definition string. If not given, defaults will be used. This string should be on the form <i>type:lockfunction(args);type2:lockfunction2(args);...") class ObjectDBAdmin(admin.ModelAdmin): list_display = ('id', 'db_key', 'db_location', 'db_player', 'db_typeclass_path') list_display_links = ('id', 'db_key') ordering = ['db_player', 'db_typeclass_path', 'id'] search_fields = ['^db_key', 'db_typeclass_path'] save_as = True save_on_top = True list_select_related = True list_filter = ('db_permissions', 'db_location', 'db_typeclass_path') # editing fields setup form = ObjectEditForm fieldsets = ( (None, { 'fields': (('db_key','db_typeclass_path'), ('db_permissions', 'db_lock_storage'), ('db_location', 'db_home'), 'db_destination','db_cmdset_storage' )}), ) #deactivated temporarily, they cause empty objects to be created in admin inlines = [AliasInline]#, ObjAttributeInline] # Custom modification to give two different forms wether adding or not. add_form = ObjectCreateForm add_fieldsets = ( (None, { 'fields': (('db_key','db_typeclass_path'), 'db_permissions', ('db_location', 'db_home'), 'db_destination','db_cmdset_storage' )}), ) def get_fieldsets(self, request, obj=None): if not obj: return self.add_fieldsets return super(ObjectDBAdmin, self).get_fieldsets(request, obj) def get_form(self, request, obj=None, **kwargs): """ Use special form during creation """ defaults = {} if obj is None: defaults.update({ 'form': self.add_form, 'fields': admin.util.flatten_fieldsets(self.add_fieldsets), }) defaults.update(kwargs) return super(ObjectDBAdmin, self).get_form(request, obj, **defaults) def save_model(self, request, obj, form, change): if not change: # adding a new object obj = obj.typeclass obj.basetype_setup() obj.basetype_posthook_setup() obj.at_object_creation() obj.at_init() admin.site.register(ObjectDB, ObjectDBAdmin)
bsd-3-clause
shimlee/mptcp_magw
tools/perf/scripts/python/net_dropmonitor.py
2669
1738
# Monitor the system for dropped packets and proudce a report of drop locations and counts import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") except: return for line in f: loc = int(line.split()[0], 16) name = line.split()[2] kallsyms.append((loc, name)) kallsyms.sort() def get_sym(sloc): loc = int(sloc) # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start # kallsyms[i][0] > loc for all end <= i < len(kallsyms) start, end = -1, len(kallsyms) while end != start + 1: pivot = (start + end) // 2 if loc < kallsyms[pivot][0]: end = pivot else: start = pivot # Now (start == -1 or kallsyms[start][0] <= loc) # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0]) if start >= 0: symloc, name = kallsyms[start] return (name, loc - symloc) else: return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print "%25s %25s %25s" % (sym, off, drop_log[i]) def trace_begin(): print "Starting trace (Ctrl-C to dump results)" def trace_end(): print "Gathering kallsyms data" get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, location, protocol): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
gpl-2.0
dgutman/ADRCPathViewer
api/routes/v1/Tile.py
1
1783
from flask_restful import Resource from flask import Response from bson.objectid import ObjectId from bson.json_util import dumps from utils.deepzoom import get_slide, PILBytesIO class Tile(Resource): def __init__(self, db, config): """initialize DeepZoom resource Args: db: mongo db connection config: application configurations opt: deep zoom configurations Returns: None """ self.db = db self.config = config self.slides = self.db[self.config["db_collection"]] def get(self, id, level, x, y): """ Get slide tile --- tags: - Tile parameters: - in: path name: id description: MonogDB ObjectId appended to it the level -- Example 57bf3c092f9b2e1595b29730 type: string - in: path name: level description: The zoom level type: integer - in: path name: x description: The column type: integer - in: path name: y description: The row type: integer responses: 200: description: Returns the slide information 404: description: Invalid slide Id or slide not found """ if not ObjectId.is_valid(id): resp = {"status": 404, "message": "Invalid slide Id " + id} return Response(dumps(resp), status=404, mimetype='application/json') image = self.slides.find_one({'_id': ObjectId(id)}) path = image["path"] slide = get_slide(path) try: tile = slide.get_tile(level, (x, y)) buf = PILBytesIO() tile.save(buf, 'jpeg', quality=90) return Response(buf.getvalue(), status=200, mimetype='image/jpeg') except ValueError: Response(None, status=404)
mit
NikolaYolov/invenio_backup
modules/bibconvert/lib/bibconvert_bfx_engine.py
17
10494
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ bibconvert_bfx_engine - XML processing library for Invenio using bfx stylesheets. Does almost what an XSLT processor does, but using a special syntax for the transformation stylesheet: a combination of 'BibFormat for XML' (bibformat bfx) templates and XPath is used. Dependencies: bibformat_bfx_engine.py Used by: bibconvert.in """ __revision__ = "$Id$" import sys import os from cStringIO import StringIO processor_type = -1 try: # Try to load from xml.xpath import Evaluate from xml.dom import minidom, Node from xml.xpath.Context import Context processor_type = 0 except ImportError: pass # TODO: Try to explicitely load 4suite Xpath # <http://4suite.org/docs/howto/UNIX.xml#PyXML> # From <http://uche.ogbuji.net/tech/akara/nodes/2003-01-01/basic-xpath>: ## 1. PyXML usage (do not use with 4Suite) ## * import xml.xslt ## * import xml.xpath ## 2. 4Suite usage (use these imports) ## * import Ft.Xml.XPath ## * import Ft.Xml.Xslt from invenio import bibformat_bfx_engine from invenio.config import CFG_ETCDIR CFG_BFX_TEMPLATES_PATH = "%s%sbibconvert%sconfig" % (CFG_ETCDIR, os.sep, os.sep) def convert(xmltext, template_filename=None, template_source=None): """ Processes an XML text according to a template, and returns the result. The template can be given either by name (or by path) or by source. If source is given, name is ignored. bibconvert_bfx_engine will look for template_filename in standard directories for templates. If not found, template_filename will be assumed to be a path to a template. If none can be found, return None. Raises an exception if cannot find an appropriate XPath module. @param xmltext: The string representation of the XML to process @param template_filename: The name of the template to use for the processing @param template_source: The configuration describing the processing. @return: the transformed XML text. """ if processor_type == -1: # No XPath processor found raise "No XPath processor could be found" # Retrieve template and read it if template_source: template = template_source elif template_filename: try: path_to_templates = (CFG_BFX_TEMPLATES_PATH + os.sep + template_filename) if os.path.exists(path_to_templates): template = file(path_to_templates).read() elif os.path.exists(template_filename): template = file(template_filename).read() else: sys.stderr.write(template_filename +' does not exist.') return None except IOError: sys.stderr.write(template_filename +' could not be read.') return None else: sys.stderr.write(template_filename +' was not given.') return None # Prepare some variables out_file = StringIO() # Virtual file-like object to write result in trans = XML2XMLTranslator() trans.set_xml_source(xmltext) parser = bibformat_bfx_engine.BFXParser(trans) # Load template # This might print some info. Redirect to stderr # but do no print on standard output standard_output = sys.stdout sys.stdout = sys.stderr # Always set 'template_name' to None, otherwise # bibformat for XML will look for it in wrong directory template_tree = parser.load_template(template_name=None, template_source=template) sys.stdout = standard_output # Transform the source using loaded template parser.walk(template_tree, out_file) output = out_file.getvalue() return output class XML2XMLTranslator: """ Generic translator for XML. """ def __init__(self): ''' Create an instance of the translator and init with the list of the defined labels and their rules. ''' self.xml_source = '' self.dom = None self.current_node = None self.namespaces = {} def is_defined(self, name): ''' Check whether a variable is defined. Accept all names. get_value will return empty string if not exist @param name: the name of the variable ''' return True ## context = Context(self.current_node, processorNss=self.namespaces) ## results_list = Evaluate(name, context=context) ## if results_list != []: ## return True ## else: ## return False def get_num_elements(self, name): ''' An API function to get the number of elements for a variable. Do not use this function to build loops, Use iterator instead. ''' context = Context(self.current_node, processorNss=self.namespaces) results_list = Evaluate(name, context=context) return len(results_list) def get_value(self, name, display_type='value'): ''' The API function for quering the translator for values of a certain variable. Called in a loop will result in a different value each time. @param name: the name of the variable you want the value of @param display_type: an optional value for the type of the desired output, one of: value, tag, ind1, ind2, code, fulltag; These can be easily added in the proper place of the code (display_value) ''' context = Context(self.current_node, processorNss=self.namespaces) results_list = Evaluate(name, context=context) if len(results_list) == 0: return '' # Select text node value of selected nodes # and concatenate return ' '.join([node.childNodes[0].nodeValue.encode( "utf-8" ) for node in results_list]) def iterator(self, name): ''' An iterator over the values of a certain name. The iterator changes state of interenal variables and objects. When calling get_value in a loop, this will result each time in a different value. ''' saved_node = self.current_node context = Context(self.current_node, processorNss=self.namespaces) results_list = Evaluate(name, context=context) for node in results_list: self.current_node = node yield node self.current_node = saved_node def call_function(self, function_name, parameters=None): ''' Call an external element which is a Python file, using BibFormat @param function_name: the name of the function to call @param parameters: a dictionary of the parameters to pass as key=value pairs @return: a string value, which is the result of the function call ''' #No support for this in bibconvert_bfx_engine ## if parameters is None: ## parameters = {} ## bfo = BibFormatObject(self.recID) ## format_element = get_format_element(function_name) ## (value, errors) = eval_format_element(format_element, bfo, parameters) ## #to do: check errors from function call ## return value return "" def set_xml_source(self, xmltext): """ Specify the source XML for this transformer @param xmltext: the XML text representation to use as source """ self.xml_source = xmltext self.dom = minidom.parseString(xmltext) self.current_node = self.dom self.namespaces = build_namespaces(self.dom) def doc_order_iter_filter(node, filter_func): """ Iterates over each node in document order, applying the filter function to each in turn, starting with the given node, and yielding each node in cases where the filter function computes true @param node: the starting point (subtree rooted at node will be iterated over document order) @param filter_func: a callable object taking a node and returning true or false """ if filter_func(node): yield node for child in node.childNodes: for cn in doc_order_iter_filter(child, filter_func): yield cn return def get_all_elements(node): """ Returns an iterator (using document order) over all element nodes that are descendants of the given one """ return doc_order_iter_filter( node, lambda n: n.nodeType == Node.ELEMENT_NODE ) def build_namespaces(dom): """ Build the namespaces present in dom tree. Necessary to use prior processing an XML file in order to execute XPath queries correctly. @param dom: the dom tree to parse to discover namespaces @return: a dictionary with prefix as key and namespace as value """ namespaces = {} for elem in get_all_elements(dom): if elem.prefix is not None: namespaces[elem.prefix] = elem.namespaceURI for attr in elem.attributes.values(): if attr.prefix is not None: namespaces[attr.prefix] = attr.namespaceURI return namespaces def bc_profile(): """ Runs a benchmark """ global xmltext convert(xmltext, 'oaidc2marcxml.bfx') return def benchmark(): """ Benchmark the module, using profile and pstats """ import profile import pstats from invenio.bibformat import record_get_xml global xmltext xmltext = record_get_xml(10, 'oai_dc') profile.run('bc_profile()', "bibconvert_xslt_profile") p = pstats.Stats("bibconvert_xslt_profile") p.strip_dirs().sort_stats("cumulative").print_stats() if __name__ == "__main__": # FIXME: Implement command line options pass
gpl-2.0